Compare commits

..

No commits in common. "main" and "v0.0.4" have entirely different histories.
main ... v0.0.4

1495 changed files with 54985 additions and 469897 deletions

View File

@ -6,4 +6,3 @@
/cover.* /cover.*
/.go/ /.go/
/tmp/ /tmp/
/.git/

View File

@ -11,29 +11,12 @@ jobs:
go-version: ['stable', 'oldstable'] go-version: ['stable', 'oldstable']
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
# Trigger rebuilding frontends, should be the same as committed.
- uses: actions/setup-node@v3
with:
node-version: 16
cache: 'npm'
- run: 'touch */*.ts'
- uses: actions/setup-go@v4 - uses: actions/setup-go@v4
with: with:
go-version: ${{ matrix.go-version }} go-version: ${{ matrix.go-version }}
- run: make build - run: make build
# Need to run tests with a temp dir on same file system for os.Rename to succeed. # Need to run tests with a temp dir on same file system for os.Rename to succeed.
- run: 'mkdir -p tmp && TMPDIR=$PWD/tmp make test' - run: 'mkdir -p tmp && TMPDIR=$PWD/tmp make test'
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with: with:
name: coverage-${{ matrix.go-version }}
path: cover.html path: cover.html
# Format code, we check below if nothing changed.
- run: 'make fmt'
# Enforce the steps above didn't make any changes.
- run: git diff --exit-code

24
.gitignore vendored
View File

@ -1,28 +1,32 @@
/mox /mox
/mox.exe
/rfc/[0-9][0-9]* /rfc/[0-9][0-9]*
/rfc/xr/
/local/ /local/
/testdata/check/ /testdata/check/
/testdata/*/data/
/testdata/ctl/config/dkim/
/testdata/empty/ /testdata/empty/
/testdata/exportmaildir/ /testdata/exportmaildir/
/testdata/exportmbox/ /testdata/exportmbox/
/testdata/httpaccount/data/
/testdata/imap/data/ /testdata/imap/data/
/testdata/imapserverfuzz/data/
/testdata/imaptest/data/
/testdata/integration/data/
/testdata/junk/*.bloom /testdata/junk/*.bloom
/testdata/junk/*.db /testdata/junk/*.db
/testdata/queue/data/
/testdata/sent/ /testdata/sent/
/testdata/smtp/data/
/testdata/smtp/datajunk/ /testdata/smtp/datajunk/
/testdata/smtp/sendlimit/data/
/testdata/smtp/catchall/data/
/testdata/smtp/postmaster/ /testdata/smtp/postmaster/
/testdata/smtpserverfuzz/data/
/testdata/store/data/
/testdata/train/ /testdata/train/
/testdata/upgradetest.mbox.gz /testdata/quickstart/example-quickstart.zone
/testdata/integration/example-integration.zone /testdata/quickstart/tmp-pebble-ca.pem
/testdata/integration/tmp-pebble-ca.pem
/cover.out /cover.out
/cover.html /cover.html
/.go/ /.go/
/node_modules/ /node_modules/
/upgrade*-verifydata.*.pprof /package.json
/upgrade*-openaccounts.*.pprof /package-lock.json
/website/html/

200
Makefile
View File

@ -1,182 +1,104 @@
default: build default: build
build: build0 frontend build1 build:
build0:
# build early to catch syntax errors # build early to catch syntax errors
CGO_ENABLED=0 go build CGO_ENABLED=0 go build
CGO_ENABLED=0 go vet ./... CGO_ENABLED=0 go vet -tags quickstart ./...
CGO_ENABLED=0 go vet -tags quickstart quickstart_test.go
./gendoc.sh ./gendoc.sh
./genapidoc.sh (cd http && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Admin) >http/adminapi.json
./gents.sh webadmin/api.json webadmin/api.ts (cd http && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >http/accountapi.json
./gents.sh webaccount/api.json webaccount/api.ts # build again, files above are embedded
./gents.sh webmail/api.json webmail/api.ts
build1:
# build again, api json files above are embedded and new frontend code generated
CGO_ENABLED=0 go build CGO_ENABLED=0 go build
install: build0 frontend
CGO_ENABLED=0 go install
race: build0
go build -race
test: test:
CGO_ENABLED=0 go test -fullpath -shuffle=on -coverprofile cover.out ./... CGO_ENABLED=0 go test -shuffle=on -coverprofile cover.out ./...
go tool cover -html=cover.out -o cover.html go tool cover -html=cover.out -o cover.html
test-race: test-race:
CGO_ENABLED=1 go test -fullpath -race -shuffle=on -covermode atomic -coverprofile cover.out ./... CGO_ENABLED=1 go test -race -shuffle=on -covermode atomic -coverprofile cover.out ./...
go tool cover -html=cover.out -o cover.html go tool cover -html=cover.out -o cover.html
test-more: test-upgrade:
TZ= CGO_ENABLED=0 go test -fullpath -shuffle=on -count 2 ./... ./test-upgrade.sh
# note: if testdata/upgradetest.mbox.gz exists, its messages will be imported
# during tests. helpful for performance/resource consumption tests.
test-upgrade: build
nice ./test-upgrade.sh
# needed for "check" target
install-staticcheck:
CGO_ENABLED=0 go install honnef.co/go/tools/cmd/staticcheck@latest
install-ineffassign:
CGO_ENABLED=0 go install github.com/gordonklaus/ineffassign@v0.1.0
check: check:
CGO_ENABLED=0 go vet -tags integration staticcheck ./...
CGO_ENABLED=0 go vet -tags website website/website.go staticcheck -tags integration
CGO_ENABLED=0 go vet -tags link rfc/link.go staticcheck -tags quickstart
CGO_ENABLED=0 go vet -tags errata rfc/errata.go GOARCH=386 CGO_ENABLED=0 go vet -tags integration ./...
CGO_ENABLED=0 go vet -tags xr rfc/xr.go
GOARCH=386 CGO_ENABLED=0 go vet ./...
CGO_ENABLED=0 ineffassign ./...
CGO_ENABLED=0 staticcheck ./...
CGO_ENABLED=0 staticcheck -tags integration
CGO_ENABLED=0 staticcheck -tags website website/website.go
CGO_ENABLED=0 staticcheck -tags link rfc/link.go
CGO_ENABLED=0 staticcheck -tags errata rfc/errata.go
CGO_ENABLED=0 staticcheck -tags xr rfc/xr.go
# needed for check-shadow
install-shadow:
CGO_ENABLED=0 go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
# having "err" shadowed is common, best to not have others # having "err" shadowed is common, best to not have others
check-shadow: check-shadow:
CGO_ENABLED=0 go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"' go vet -vettool=$$(which shadow) ./... 2>&1 | grep -v '"err"'
CGO_ENABLED=0 go vet -tags integration -vettool=$$(which shadow) 2>&1 | grep -v '"err"'
CGO_ENABLED=0 go vet -tags website -vettool=$$(which shadow) website/website.go 2>&1 | grep -v '"err"'
CGO_ENABLED=0 go vet -tags link -vettool=$$(which shadow) rfc/link.go 2>&1 | grep -v '"err"'
CGO_ENABLED=0 go vet -tags errata -vettool=$$(which shadow) rfc/errata.go 2>&1 | grep -v '"err"'
CGO_ENABLED=0 go vet -tags xr -vettool=$$(which shadow) rfc/xr.go 2>&1 | grep -v '"err"'
fuzz: fuzz:
go test -fullpath -fuzz FuzzParseSignature -fuzztime 5m ./dkim go test -fuzz FuzzParseSignature -fuzztime 5m ./dkim
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./dkim go test -fuzz FuzzParseRecord -fuzztime 5m ./dkim
go test -fullpath -fuzz . -fuzztime 5m ./dmarc go test -fuzz . -fuzztime 5m ./dmarc
go test -fullpath -fuzz . -fuzztime 5m ./dmarcrpt go test -fuzz . -fuzztime 5m ./dmarcrpt
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./imapserver go test -fuzz . -parallel 1 -fuzztime 5m ./imapserver
go test -fullpath -fuzz . -fuzztime 5m ./imapclient go test -fuzz . -parallel 1 -fuzztime 5m ./junk
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./junk go test -fuzz FuzzParseRecord -fuzztime 5m ./mtasts
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./mtasts go test -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts
go test -fullpath -fuzz FuzzParsePolicy -fuzztime 5m ./mtasts go test -fuzz . -parallel 1 -fuzztime 5m ./smtpserver
go test -fullpath -fuzz . -fuzztime 5m ./smtp go test -fuzz . -fuzztime 5m ./spf
go test -fullpath -fuzz . -parallel 1 -fuzztime 5m ./smtpserver go test -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
go test -fullpath -fuzz . -fuzztime 5m ./spf go test -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
go test -fullpath -fuzz FuzzParseRecord -fuzztime 5m ./tlsrpt
go test -fullpath -fuzz FuzzParseMessage -fuzztime 5m ./tlsrpt
govendor:
go mod tidy
go mod vendor
./genlicenses.sh
test-integration: test-integration:
-docker compose -f docker-compose-integration.yml kill docker-compose -f docker-compose-integration.yml build --no-cache --pull moxmail
-docker compose -f docker-compose-integration.yml down -rm -r testdata/integration/data
docker image build --pull --no-cache -f Dockerfile -t mox_integration_moxmail . docker-compose -f docker-compose-integration.yml run moxmail sh -c 'CGO_ENABLED=0 go test -tags integration'
docker image build --pull --no-cache -f testdata/integration/Dockerfile.test -t mox_integration_test testdata/integration docker-compose -f docker-compose-integration.yml down
-rm -rf testdata/integration/moxacmepebble/data
-rm -rf testdata/integration/moxmail2/data # like test-integration, but in separate steps
-rm -f testdata/integration/tmp-pebble-ca.pem integration-build:
MOX_UID=$$(id -u) docker compose -f docker-compose-integration.yml run test docker-compose -f docker-compose-integration.yml build --no-cache --pull moxmail
docker compose -f docker-compose-integration.yml kill
integration-start:
-rm -r testdata/integration/data
-docker-compose -f docker-compose-integration.yml run moxmail /bin/bash
docker-compose -f docker-compose-integration.yml down
# run from within "make integration-start"
integration-test:
CGO_ENABLED=0 go test -tags integration
test-quickstart:
docker image build --pull -f Dockerfile -t mox_quickstart_moxmail .
docker image build --pull -f testdata/quickstart/Dockerfile.test -t mox_quickstart_test testdata/quickstart
-rm -rf testdata/quickstart/moxacmepebble/data
-rm -rf testdata/quickstart/moxmail2/data
-rm -f testdata/quickstart/tmp-pebble-ca.pem
MOX_UID=$$(id -u) docker-compose -f docker-compose-quickstart.yml run test
docker-compose -f docker-compose-quickstart.yml down --timeout 1
imaptest-build: imaptest-build:
-docker compose -f docker-compose-imaptest.yml build --no-cache --pull mox -docker-compose -f docker-compose-imaptest.yml build --no-cache --pull mox
imaptest-run: imaptest-run:
-rm -r testdata/imaptest/data -rm -r testdata/imaptest/data
mkdir testdata/imaptest/data mkdir testdata/imaptest/data
docker compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox docker-compose -f docker-compose-imaptest.yml run --entrypoint /usr/local/bin/imaptest imaptest host=mox port=1143 user=mjl@mox.example pass=testtest mbox=imaptest.mbox
docker compose -f docker-compose-imaptest.yml down docker-compose -f docker-compose-imaptest.yml down
fmt: fmt:
go fmt ./... go fmt ./...
gofmt -w -s *.go */*.go gofmt -w -s *.go */*.go
tswatch: jswatch:
bash -c 'while true; do inotifywait -q -e close_write *.ts webadmin/*.ts webaccount/*.ts webmail/*.ts; make frontend; done' inotifywait -m -e close_write http/admin.html http/account.html | xargs -n2 sh -c 'echo changed; ./checkhtmljs http/admin.html http/account.html'
node_modules/.bin/tsc: jsinstall:
-mkdir -p node_modules/.bin -mkdir -p node_modules/.bin
npm ci --ignore-scripts npm install jshint@2.13.2
install-js: node_modules/.bin/tsc
install-js0:
-mkdir -p node_modules/.bin
npm install --ignore-scripts --save-dev --save-exact typescript@5.1.6
webmail/webmail.js: lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/webmail.ts
webmail/msg.js: lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/msg.ts
webmail/text.js: lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
./tsc.sh $@ lib.ts webmail/api.ts webmail/lib.ts webmail/text.ts
webadmin/admin.js: lib.ts webadmin/api.ts webadmin/admin.ts
./tsc.sh $@ lib.ts webadmin/api.ts webadmin/admin.ts
webaccount/account.js: lib.ts webaccount/api.ts webaccount/account.ts
./tsc.sh $@ lib.ts webaccount/api.ts webaccount/account.ts
frontend: node_modules/.bin/tsc webadmin/admin.js webaccount/account.js webmail/webmail.js webmail/msg.js webmail/text.js
install-apidiff:
CGO_ENABLED=0 go install golang.org/x/exp/cmd/apidiff@v0.0.0-20231206192017-f3f8817b8deb
genapidiff:
./apidiff.sh
docker: docker:
docker build -t mox:dev . docker build -t mox:dev .
docker-release: docker-release:
./docker-release.sh ./docker-release.sh
genwebsite:
./genwebsite.sh
buildall:
CGO_ENABLED=0 GOOS=linux GOARCH=arm go build
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=linux GOARCH=386 go build
CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=netbsd GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=dragonfly GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=illumos GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=solaris GOARCH=amd64 go build
CGO_ENABLED=0 GOOS=aix GOARCH=ppc64 go build
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build
# no plan9 for now

523
README.md
View File

@ -1,48 +1,102 @@
Mox is a modern full-featured open source secure mail server for low-maintenance self-hosted email. Mox is a modern full-featured open source secure mail server for low-maintenance self-hosted email.
For more details, see the mox website, https://www.xmox.nl.
See Quickstart below to get started. See Quickstart below to get started.
## Features ## Features
- Quick and easy to start/maintain mail server, for your own domain(s). - Quick and easy to start/maintain mail server, for your own domain(s).
- SMTP (with extensions) for receiving, submitting and delivering email. - SMTP (with extensions) for receiving and submitting email.
- IMAP4 (with extensions) for giving email clients access to email. - IMAP4 (with extensions) for giving email clients access to email.
- Webmail for reading/sending email from the browser. - Automatic TLS with ACME, for use with Let's Encrypt and other CA's.
- SPF/DKIM/DMARC for authenticating messages/delivery, also DMARC aggregate - SPF, verifying that a remote host is allowed to sent email for a domain.
reports. - DKIM, verifying that a message is signed by the claimed sender domain,
- Reputation tracking, learning (per user) host-, domain- and and for signing emails sent by mox for others to verify.
sender address-based reputation from (Non-)Junk email classification. - DMARC, for enforcing SPF/DKIM policies set by domains. Incoming DMARC
aggregate reports are analyzed.
- Reputation tracking, learning (per user) host- and domain-based reputation from
(Non-)Junk email.
- Bayesian spam filtering that learns (per user) from (Non-)Junk email. - Bayesian spam filtering that learns (per user) from (Non-)Junk email.
- Slowing down senders with no/low reputation or questionable email content - Slowing down senders with no/low reputation or questionable email content
(similar to greylisting). Rejected emails are stored in a mailbox called Rejects (similar to greylisting). Rejected emails are stored in a mailbox called Rejects
for a short period, helping with misclassified legitimate synchronous for a short period, helping with misclassified legitimate synchronous
signup/login/transactional emails. signup/login/transactional emails.
- Internationalized email (EIA), with unicode in email address usernames - Internationalized email, with unicode names in domains and usernames
("localparts"), and in domain names (IDNA). ("localparts").
- Automatic TLS with ACME, for use with Let's Encrypt and other CA's. - TLSRPT, parsing reports about TLS usage and issues.
- DANE and MTA-STS for inbound and outbound delivery over SMTP with STARTTLS, - MTA-STS, for ensuring TLS is used whenever it is required. Both serving of
including REQUIRETLS and with incoming/outgoing TLSRPT reporting. policies, and tracking and applying policies of remote servers.
- Web admin interface that helps you set up your domains, accounts and list - Web admin interface that helps you set up your domains and accounts
aliases (instructions to create DNS records, configure (instructions to create DNS records, configure
SPF/DKIM/DMARC/TLSRPT/MTA-STS), for status information, and modifying the SPF/DKIM/DMARC/TLSRPT/MTA-STS), for status information, managing
configuration file. accounts/domains, and modifying the configuration file.
- Account autodiscovery (with SRV records, Microsoft-style, Thunderbird-style, - Autodiscovery (with SRV records, Microsoft-style and Thunderbird-style) for
and Apple device management profiles) for easy account setup (though client easy account setup (though not many clients support it).
support is limited).
- Webserver with serving static files and forwarding requests (reverse - Webserver with serving static files and forwarding requests (reverse
proxy), so port 443 can also be used to serve websites. proxy), so port 443 can also be used to serve websites.
- Simple HTTP/JSON API for sending transaction email and receiving delivery
events and incoming messages (webapi and webhooks).
- Prometheus metrics and structured logging for operational insight. - Prometheus metrics and structured logging for operational insight.
- "mox localserve" subcommand for running mox locally for email-related - "localserve" subcommand for running mox locally for email-related
testing/developing, including pedantic mode. testing/developing, including pedantic mode.
- Most non-server Go packages mox consists of are written to be reusable.
Mox is available under the MIT-license and was created by Mechiel Lukkien, Mox is available under the MIT-license and was created by Mechiel Lukkien,
mechiel@ueber.net. Mox includes BSD-3-claused code from the Go Authors, and the mechiel@ueber.net. Mox includes the Public Suffix List by Mozilla, under Mozilla
Public Suffix List by Mozilla under Mozilla Public License, v2.0. Public License, v2.0.
# Download
You can easily (cross) compile mox if you have a recent Go toolchain installed
(see "go version", it must be >= 1.19; otherwise, see https://go.dev/dl/ or
https://go.dev/doc/manage-install and $HOME/go/bin):
GOBIN=$PWD CGO_ENABLED=0 go install github.com/mjl-/mox@latest
Or you can download a binary built with the latest Go toolchain from
https://beta.gobuilds.org/github.com/mjl-/mox, and symlink or rename it to
"mox".
Verify you have a working mox binary:
./mox version
Note: Mox only compiles for/works on unix systems, not on Plan 9 or Windows.
You can also run mox with docker image `r.xmox.nl/mox`, with tags like `v0.0.1`
and `v0.0.1-go1.20.1-alpine3.17.2`, see https://r.xmox.nl/repo/mox/. See
docker-compose.yml in this repository for instructions on starting. You must run
docker with host networking, because mox needs to find your actual public IP's
and get the remote IPs for incoming connections, not a local/internal NAT IP.
# Quickstart
The easiest way to get started with serving email for your domain is to get a
vm/machine dedicated to serving email, name it [host].[domain] (e.g.
mail.example.com), login as root, and run:
# Create mox user and homedir (or pick another name or homedir):
useradd -m -d /home/mox mox
cd /home/mox
... compile or download mox to this directory, see above ...
# Generate config files for your address/domain:
./mox quickstart you@example.com
The quickstart creates an account, generates a password and configuration
files, prints the DNS records you need to manually create and prints commands
to start mox and optionally install mox as a service.
A dedicated machine is highly recommended because modern email requires HTTPS,
and mox currently needs it for automatic TLS. You could combine mox with an
existing webserver, but it requires more configuration. If you want to serve
websites on the same machine, consider using the webserver built into mox. If
you want to run an existing webserver on port 443/80, see "mox help quickstart",
it'll tell you to run "./mox quickstart -existing-webserver you@example.com".
After starting, you can access the admin web interface on internal IPs.
# Future/development
Mox has automated tests, including for interoperability with Postfix for SMTP. Mox has automated tests, including for interoperability with Postfix for SMTP.
Mox is manually tested with email clients: Mozilla Thunderbird, mutt, iOS Mail, Mox is manually tested with email clients: Mozilla Thunderbird, mutt, iOS Mail,
@ -52,138 +106,40 @@ proton.me.
The code is heavily cross-referenced with the RFCs for readability/maintainability. The code is heavily cross-referenced with the RFCs for readability/maintainability.
# Quickstart ## Roadmap
The easiest way to get started with serving email for your domain is to get a
(virtual) machine dedicated to serving email, name it `[host].[domain]` (e.g.
mail.example.com). Having a DNSSEC-verifying resolver installed, such as
unbound, is highly recommended. Run as root:
# Create mox user and homedir (or pick another name or homedir):
useradd -m -d /home/mox mox
cd /home/mox
... compile or download mox to this directory, see below ...
# Generate config files for your address/domain:
./mox quickstart you@example.com
The quickstart:
- Creates configuration files mox.conf and domains.conf.
- Adds the domain and an account for the email address to domains.conf
- Generates an admin and account password.
- Prints the DNS records you need to add, for the machine and domain.
- Prints commands to start mox, and optionally install mox as a service.
A machine that doesn't already run a webserver is highly recommended because
modern email requires HTTPS, and mox currently needs to run a webserver for
automatic TLS with ACME. You could combine mox with an existing webserver, but
it requires a lot more configuration. If you want to serve websites on the same
machine, consider using the webserver built into mox. It's pretty good! If you
want to run an existing webserver on port 443/80, see `mox help quickstart`.
After starting, you can access the admin web interface on internal IPs.
# Download
Download a mox binary from
https://beta.gobuilds.org/github.com/mjl-/mox@latest/linux-amd64-latest/.
Symlink or rename it to "mox".
The URL above always resolves to the latest release for linux/amd64 built with
the latest Go toolchain. See the links at the bottom of that page for binaries
for other platforms.
# Compiling
You can easily (cross) compile mox yourself. You need a recent Go toolchain
installed. Run `go version`, it must be >= 1.23. Download the latest version
from https://go.dev/dl/ or see https://go.dev/doc/manage-install.
To download the source code of the latest release, and compile it to binary "mox":
GOBIN=$PWD CGO_ENABLED=0 go install github.com/mjl-/mox@latest
Mox only compiles for and fully works on unix systems. Mox also compiles for
Windows, but "mox serve" does not yet work, though "mox localserve" (for a
local test instance) and most other subcommands do. Mox does not compile for
Plan 9.
# Docker
Although not recommended, you can also run mox with docker image
`r.xmox.nl/mox`, with tags like `v0.0.1` and `v0.0.1-go1.20.1-alpine3.17.2`, see
https://r.xmox.nl/r/mox/. See
https://github.com/mjl-/mox/blob/main/docker-compose.yml to get started.
New docker images aren't (automatically) generated for new Go runtime/compile
releases.
It is important to run with docker host networking, so mox can use the public
IPs and has correct remote IP information for incoming connections (important
for junk filtering and rate-limiting).
# Development
See develop.txt for instructions/tips for developing on mox.
# Sponsors
Thanks to NLnet foundation, the European Commission's NGI programme, and the
Netherlands Ministry of the Interior and Kingdom Relations for financial
support:
- 2024/2025, NLnet NGI0 Zero Core, https://nlnet.nl/project/Mox-Automation/
- 2024, NLnet e-Commons Fund, https://nlnet.nl/project/Mox-API/
- 2023/2024, NLnet NGI0 Entrust, https://nlnet.nl/project/Mox/
# Roadmap
- "mox setup" command, using admin web interface for interactive setup
- Automate DNS management, for setup and maintenance, such as DANE/DKIM key rotation
- Config options for "transactional email domains", for which mox will only
send messages
- Encrypted storage of files (email messages, TLS keys), also with per account keys
- Recognize common deliverability issues and help postmasters solve them
- JMAP, IMAP OBJECTID extension, IMAP JMAPACCESS extension
- Calendaring with CalDAV/iCal
- Introbox, to which first-time senders are delivered
- Add special IMAP mailbox ("Queue?") that contains queued but
undelivered messages, updated with IMAP flags/keywords/tags and message headers.
- External addresses in aliases/lists.
- Autoresponder (out of office/vacation)
- Mailing list manager
- IMAP extensions for "online"/non-syncing/webmail clients (SORT (including
DISPLAYFROM, DISPLAYTO), THREAD, PARTIAL, CONTEXT=SEARCH CONTEXT=SORT ESORT,
FILTERS)
- IMAP ACL support, for account sharing (interacts with many extensions and code)
- Improve support for mobile clients with extensions: IMAP URLAUTH, SMTP
CHUNKING and BINARYMIME, IMAP CATENATE
- Privilege separation, isolating parts of the application to more restricted - Privilege separation, isolating parts of the application to more restricted
sandbox (e.g. new unauthenticated connections) sandbox (e.g. new unauthenticated connections).
- Using mox as backup MX - DANE and DNSSEC.
- Sending DMARC and TLS reports (currently only receiving).
- OAUTH2 support, for single sign on.
- Add special IMAP mailbox ("Queue?") that contains queued but
not-yet-delivered messages.
- Sieve for filtering (for now see Rulesets in the account config) - Sieve for filtering (for now see Rulesets in the account config)
- ARC, with forwarded email from trusted source - Calendaring
- Milter support, for integration with external tools - IMAP CONDSTORE and QRESYNC extensions
- SMTP DSN extension - IMAP THREAD extension
- IMAP Sieve extension, to run Sieve scripts after message changes (not only - Using mox as backup MX.
new deliveries) - Old-style internationalization in messages.
- OAUTH2 support, for single sign on - JMAP
- Forwarding (to an external address) - Webmail
There are many smaller improvements to make as well, search for "todo" in the code. There are many smaller improvements to make as well, search for "todo" in the code.
## Not supported/planned ## Not supported
There is currently no plan to implement the following. Though this may But perhaps in the future...
change in the future.
- Functioning as an SMTP relay without authentication - HTTP-based API for sending messages and receiving delivery feedback
- Functioning as SMTP relay
- Forwarding (to an external address)
- Autoresponders
- POP3 - POP3
- Delivery to (unix) OS system users (mbox/Maildir) - Delivery to (unix) OS system users
- Mailing list manager
- Support for pluggable delivery mechanisms - Support for pluggable delivery mechanisms
- iOS Mail push notifications (with XAPPLEPUSHSERVICE undocumened imap
extension and hard to get APNS certificate)
# FAQ - Frequently Asked Questions # FAQ - Frequently Asked Questions
@ -191,26 +147,18 @@ change in the future.
## Why a new mail server implementation? ## Why a new mail server implementation?
Mox aims to make "running a mail server" easy and nearly effortless. Excellent Mox aims to make "running a mail server" easy and nearly effortless. Excellent
quality (open source) mail server software exists, but getting a working setup quality mail server software exists, but getting a working setup typically
typically requires you configure half a dozen services (SMTP, IMAP, requires you configure half a dozen services (SMTP, IMAP, SPF/DKIM/DMARC, spam
SPF/DKIM/DMARC, spam filtering), which are often written in C (where small bugs filtering). That seems to lead to people no longer running their own mail
often have large consequences). That seems to lead to people no longer running servers, instead switching to one of the few centralized email providers. Email
their own mail servers, instead switching to one of the few centralized email with SMTP is a long-time decentralized messaging protocol. To keep it
providers. Email with SMTP is a long-time decentralized messaging protocol. To decentralized, people need to run their own mail server. Mox aims to make that
keep it decentralized, people need to run their own mail server. Mox aims to easy.
make that easy.
## Where is the documentation? ## Where is the documentation?
To keep mox as a project maintainable, documentation is integrated into, and See all commands and help text at https://pkg.go.dev/github.com/mjl-/mox/, and
generated from the code. example config files at https://pkg.go.dev/github.com/mjl-/mox/config/.
A list of mox commands, and their help output, are at
https://www.xmox.nl/commands/.
Mox is configured through configuration files, and each field comes with
documentation. See https://www.xmox.nl/config/ for config files containing all
fields and their documentation.
You can get the same information by running "mox" without arguments to list its You can get the same information by running "mox" without arguments to list its
subcommands and usage, and "mox help [subcommand]" for more details. subcommands and usage, and "mox help [subcommand]" for more details.
@ -218,44 +166,9 @@ subcommands and usage, and "mox help [subcommand]" for more details.
The example config files are printed by "mox config describe-static" and "mox The example config files are printed by "mox config describe-static" and "mox
config describe-dynamic". config describe-dynamic".
If you're missing some documentation, please create an issue describing what is Mox is still in early stages, and documentation is still limited. Please create
unclear or confusing, and we'll try to improve the documentation. an issue describing what is unclear or confusing, and we'll try to improve the
documentation.
## Is Mox affected by SMTP smuggling?
Mox itself is not affected: it only treats "\r\n.\r\n" as SMTP end-of-message.
But read on for caveats.
SMTP smuggling exploits differences in handling by SMTP servers of: carriage
returns (CR, or "\r"), newlines (line feeds, LF, "\n") in the context of "dot
stuffing". SMTP is a text-based protocol. An SMTP transaction to send a
message is finalized with a "\r\n.\r\n" sequence. This sequence could occur in
the message being transferred, so any verbatim "." at the start of a line in a
message is "escaped" with another dot ("dot stuffing"), to not trigger the SMTP
end-of-message. SMTP smuggling takes advantage of bugs in some mail servers
that interpret other sequences than "\r\n.\r\n" as SMTP end-of-message. For
example "\n.\n" or even "\r.\r", and perhaps even other magic character
combinations.
Before v0.0.9, mox accepted SMTP transactions with bare carriage returns
(without newline) for compatibility with real-world email messages, considering
them meaningless and therefore innocuous.
Since v0.0.9, SMTP transactions with bare carriage returns are rejected.
Sending messages with bare carriage returns to buggy mail servers can cause
those mail servers to materialize non-existent messages. Now that mox rejects
messages with bare carriage returns, sending a message through mox can no
longer be used to trigger those bugs.
Mox can still handle bare carriage returns in email messages, e.g. those
imported from mbox files or Maildirs, or from messages added over IMAP. Mox
still fixes up messages with bare newlines by adding the missing carriage
returns.
Before v0.0.9, an SMTP transaction for a message containing "\n.\n" would
result in a non-specific error message, and "\r\n.\n" would result in the dot
being dropped. Since v0.0.9, these sequences are rejected with a message
mentioning SMTP smuggling.
## How do I import/export email? ## How do I import/export email?
@ -267,10 +180,6 @@ and copy or move messages from one account to the other.
Similarly, see the export functionality on the accounts web page and the "mox Similarly, see the export functionality on the accounts web page and the "mox
export maildir" and "mox export mbox" subcommands to export email. export maildir" and "mox export mbox" subcommands to export email.
Importing large mailboxes may require a lot of memory (a limitation of the
current database). Splitting up mailboxes in smaller parts (e.g. 100k messages)
would help.
## How can I help? ## How can I help?
Mox needs users and testing in real-life setups! So just give it a try, send Mox needs users and testing in real-life setups! So just give it a try, send
@ -286,33 +195,31 @@ compatibility issues, limitations, anti-spam measures, specification
violations, that would be interesting to hear about. violations, that would be interesting to hear about.
Pull requests for bug fixes and new code are welcome too. If the changes are Pull requests for bug fixes and new code are welcome too. If the changes are
large, it helps to start a discussion (create an "issue") before doing all the large, it helps to start a discussion (create a ticket) before doing all the
work. In practice, starting with a small contribution and growing from there has work.
the highest chance of success.
By contributing (e.g. code), you agree your contributions are licensed under the By contributing (e.g. code), you agree your contributions are licensed under the
MIT license (like mox), and have the rights to do so. MIT license (like mox), and have the rights to do so.
## Where can I discuss mox? ## Where can I discuss mox?
Join #mox on irc.oftc.net, or #mox:matrix.org (https://matrix.to/#/#mox:matrix.org), Join #mox on irc.oftc.net, or #mox on the "Gopher slack".
or #mox on the "Gopher slack".
For bug reports, please file an issue at https://github.com/mjl-/mox/issues/new. For bug reports, please file an issue at https://github.com/mjl-/mox/issues/new.
## How do I change my password? ## How do I change my password?
Regular users (doing IMAP/SMTP with authentication) can change their password Regular users (doing IMAP/SMTP with authentication) can change their password
at the account page, e.g. `http://localhost/`. Or you can set a password with "mox at the account page, e.g. http://localhost/. Or you can set a password with "mox
setaccountpassword". setaccountpassword".
The admin can change the password of any account through the admin page, at The admin can change the password of any account through the admin page, at
`http://localhost/admin/` by default (leave username empty when logging in). http://localhost/admin/ by default (leave username empty when logging in).
The account and admin pages are served on localhost for configs created with The account and admin pages are served on localhost on your mail server.
the quickstart. To access these from your browser, run To access these from your browser, run
`ssh -L 8080:localhost:80 you@yourmachine` locally and open `ssh -L 8080:localhost:80 you@yourmachine` locally and open
`http://localhost:8080/[...]`. http://localhost:8080/[...].
The admin password can be changed with "mox setadminpassword". The admin password can be changed with "mox setadminpassword".
@ -321,13 +228,8 @@ The admin password can be changed with "mox setadminpassword".
Unfortunately, mox does not yet provide an option for that. Mox does spam Unfortunately, mox does not yet provide an option for that. Mox does spam
filtering based on reputation of received messages. It will take a good amount filtering based on reputation of received messages. It will take a good amount
of work to share that information with a backup MX. Without that information, of work to share that information with a backup MX. Without that information,
spammers could use a backup MX to get their spam accepted. spammers could use a backup MX to get their spam accepted. Until mox has a
proper solution, you can simply run a single SMTP server.
Until mox has a proper solution, you can simply run a single SMTP server. The
author has run a single mail server for over a decade without issues. Machines
and network connectivity are stable nowadays, and email delivery will be
retried for many hours during temporary errors (e.g. when rebooting a machine
after updates).
## How do I stay up to date? ## How do I stay up to date?
@ -344,7 +246,7 @@ You can also monitor newly added releases on this repository with the github
(https://github.com/mjl-/mox/tags.atom) or releases (https://github.com/mjl-/mox/tags.atom) or releases
(https://github.com/mjl-/mox/releases.atom), or monitor the docker images. (https://github.com/mjl-/mox/releases.atom), or monitor the docker images.
Keep in mind you have a responsibility to keep the internet-connected software Keep in mind you have a responsibility to keep the internect-connected software
you run up to date and secure. you run up to date and secure.
## How do I upgrade my mox installation? ## How do I upgrade my mox installation?
@ -354,24 +256,20 @@ in place and restart. If manual actions are required, the release notes mention
them. Check the release notes of all version between your current installation them. Check the release notes of all version between your current installation
and the release you're upgrading to. and the release you're upgrading to.
Before upgrading, make a backup of the config & data directory with `mox backup Before upgrading, make a backup of the data directory with `mox backup
<destdir>`. This copies all files from the config directory to <destdir>`. This writes consistent snapshots of the database files, and
`<destdir>/config`, and creates `<destdir>/data` with a consistent snapshots of duplicates message files from the queue and accounts. Using the new mox
the database files, and message files from the outgoing queue and accounts. binary, run `mox verifydata <backupdir>` (do NOT use the "live" data
Using the new mox binary, run `mox verifydata <destdir>/data` (do NOT use the directory!) for a dry run. If this fails, an upgrade will probably fail too.
"live" data directory!) for a dry run. If this fails, an upgrade will probably
fail too.
Important: verifydata with the new mox binary can modify the database files Important: verifydata with the new mox binary can modify the database files
(due to automatic schema upgrades). So make a fresh backup again before the (due to automatic schema upgrades). So make a fresh backup again before the
actual upgrade. See the help output of the "backup" and "verifydata" commands actual upgrade. See the help output of the "backup" and "verifydata" commands
for more details. for more details.
During backup, message files are hardlinked if possible, and copied otherwise. During backup, message files are hardlinked if possible. Using a destination
Using a destination directory like `data/tmp/backup` increases the odds directory like `data/tmp/backup` increases the odds hardlinking succeeds: the
hardlinking succeeds: the default mox systemd service file mounts default systemd service file specifically mounts the data directory, causing
the data directory separately, so hardlinks to outside the data directory are attempts to outside it to fail with an error about cross-device linking.
cross-device and will fail.
If an upgrade fails and you have to restore (parts) of the data directory, you If an upgrade fails and you have to restore (parts) of the data directory, you
should run `mox verifydata <datadir>` (with the original binary) on the should run `mox verifydata <datadir>` (with the original binary) on the
@ -405,148 +303,15 @@ should account for the size of the email messages (no compression currently),
an additional 15% overhead for the meta data, and add some more headroom. an additional 15% overhead for the meta data, and add some more headroom.
Expand as necessary. Expand as necessary.
## Won't the big email providers block my email? ## Can I see some screenshots?
It is a common misconception that it is impossible to run your own email server Yes, see https://www.xmox.nl/screenshots/.
nowadays. The claim is that the handful big email providers will simply block
your email. However, you can run your own email server just fine, and your
email will be accepted, provided you are doing it right.
If your email is rejected, it is often because your IP address has a bad email Mox has an "account" web interface where users can view their account and
sending reputation. Email servers often use IP blocklists to reject email manage their address configuration, such as rules for automatically delivering
networks with a bad email sending reputation. These blocklists often work at certain incoming messages to a specific mailbox.
the level of whole network ranges. So if you try to run an email server from a
hosting provider with a bad reputation (which happens if they don't monitor
their network or don't act on abuse/spam reports), your IP too will have a bad
reputation and other mail servers (both large and small) may reject messages
coming from you. During the quickstart, mox checks if your IPs are on a few
often-used blocklists. It's typically not a good idea to host an email server
on the cheapest or largest cloud providers: They often don't spend the
resources necessary for a good reputation, or they simply block all outgoing
SMTP traffic. It's better to look for a technically-focused local provider.
They too may initially block outgoing SMTP connections on new machines to
prevent spam from their networks. But they will either automatically open up
outgoing SMTP traffic after a cool down period (e.g. 24 hours), or after you've
contacted their support.
After you get past the IP blocklist checks, email servers use many more signals Mox also has an "admin" web interface where the mox instance administrator can
to determine if your email message could be spam and should be rejected. Mox make changes, e.g. add/remove/modify domains/accounts/addresses.
helps you set up a system that doesn't trigger most of the technical signals
(e.g. with SPF/DKIM/DMARC). But there are more signals, for example: Sending to
a mail server or address for the first time. Sending from a newly registered
domain (especially if you're sending automated messages, and if you send more
messages after previous messages were rejected), domains that existed for a few
weeks to a month are treated more friendly. Sending messages with content that
resembles known spam messages.
Should your email be rejected, you will typically get an error message during Mox does not have a webmail yet, so there are no screenshots of actual email.
the SMTP transaction that explains why. In the case of big email providers the
error message often has instructions on how to prove to them you are a
legitimate sender.
## Can mox deliver through a smarthost?
Yes, you can configure a "Transport" in mox.conf and configure "Routes" in
domains.conf to send some or all messages through the transport. A transport
can be an SMTP relay or authenticated submission, or making mox make outgoing
connections through a SOCKS proxy.
For an example, see https://www.xmox.nl/config/#hdr-example-transport. For
details about Transports and Routes, see
https://www.xmox.nl/config/#cfg-mox-conf-Transports and
https://www.xmox.nl/config/#cfg-domains-conf-Routes.
Remember to add the IP addresses of the transport to the SPF records of your
domains. Keep in mind some 3rd party submission servers may mishandle your
messages, for example by replacing your Message-Id header and thereby
invalidating your DKIM-signatures, or rejecting messages with more than one
DKIM-signature.
## Can I use mox to send transactional email?
Yes. While you can use SMTP submission to send messages you've composed
yourself, and monitor a mailbox for DSNs, a more convenient option is to use
the mox HTTP/JSON-based webapi and webhooks.
The mox webapi can be used to send outgoing messages that mox composes. The web
api can also be used to deal with messages stored in an account, like changing
message flags, retrieving messages in parsed form or individual parts of
multipart messages, or moving messages to another mailbox or deleting messages
altogether.
Mox webhooks can be used to receive updates about incoming and outgoing
deliveries. Mox can automatically manage per account suppression lists.
See https://www.xmox.nl/features/#hdr-webapi-and-webhooks for details.
## Can I use existing TLS certificates/keys?
Yes. The quickstart command creates a config that uses ACME with Let's Encrypt,
but you can change the config file to use existing certificate and key files.
You'll see "ACME: letsencrypt" in the "TLS" section of the "public" Listener.
Remove or comment out the ACME-line, and add a "KeyCerts" section, see
https://www.xmox.nl/config/#cfg-mox-conf-Listeners-x-TLS-KeyCerts
You can have multiple certificates and keys: The line with the "-" (dash) is
the start of a list item. Duplicate that line up to and including the line with
KeyFile for each certificate/key you have. Mox makes a TLS config that holds
all specified certificates/keys, and uses it for all services for that Listener
(including a webserver), choosing the correct certificate for incoming
requests.
Keep in mind that for each email domain you host, you will need a certificate
for `mta-sts.<domain>`, `autoconfig.<domain>` and `mail.<domain>`, unless you
disable MTA-STS, autoconfig and the client-settings-domain for that domain.
Mox opens the key and certificate files during initial startup, as root (and
passes file descriptors to the unprivileged process). No special permissions
are needed on the key and certificate files.
## Can I directly access mailboxes through the file system?
No, mox only provides access to email through protocols like IMAP.
While it can be convenient for users/email clients to access email through
conventions like Maildir, providing such access puts quite a burden on the
server: The server has to continuously watch for changes made to the mail store
by external programs, and sync its internal state. By only providing access to
emails through mox, the storage/state management is simpler and easier to
implement reliably.
Not providing direct file system access also allows future improvements in the
storage mechanism. Such as encryption of all stored messages. Programs won't be
able to access such messages directly.
Mox stores metadata about delivered messages in its per-account message index
database, more than fits in a simple (filename-based) format like Maildir. The
IP address of the remote SMTP server during delivery, SPF/DKIM/DMARC domains
and validation status, and more...
For efficiency, mox doesn't prepend message headers generated during delivery
(e.g. Authentication-Results) to the on-disk message file, but only stores it
in the database. This prevents a rewrite of the entire message file. When
reading a message, mox combines the prepended headers from the database with
the message file.
Mox user accounts have no relation to operating system user accounts. Multiple
system users reading their email on a single machine is not very common
anymore. All data (for all accounts) stored by mox is accessible only by the
mox process. Messages are currently stored as individual files in standard
Internet Message Format (IMF), at `data/accounts/<account>/msg/<dir>/<msgid>`:
`msgid` is a consecutive unique integer id assigned by the per-account message
index database; `dir` groups 8k consecutive message ids into a directory,
ensuring they don't become too large. The message index database file for an
account is at `data/accounts/<account>/index.db`, accessed with the bstore
database library, which uses bbolt (formerly BoltDB) for storage, a
transactional key/value library/file format inspired by LMDB.
## How do I block IPs with authentication failures with fail2ban?
Mox includes a rate limiter for IPs/networks that cause too many authentication
failures. It automatically unblocks such IPs/networks after a while. So you may
not need fail2ban. If you want to use fail2ban, you could use this snippet:
[Definition]
failregex = .*failed authentication attempt.*remote=<HOST>
ignoreregex =

File diff suppressed because it is too large Load Diff

View File

@ -1,175 +0,0 @@
package admin
import (
"fmt"
"maps"
"slices"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mox-"
)
type TLSMode uint8
const (
TLSModeImmediate TLSMode = 0
TLSModeSTARTTLS TLSMode = 1
TLSModeNone TLSMode = 2
)
type ProtocolConfig struct {
Host dns.Domain
Port int
TLSMode TLSMode
EnabledOnHTTPS bool
}
type ClientConfig struct {
IMAP ProtocolConfig
Submission ProtocolConfig
}
// ClientConfigDomain returns a single IMAP and Submission client configuration for
// a domain.
func ClientConfigDomain(d dns.Domain) (rconfig ClientConfig, rerr error) {
var haveIMAP, haveSubmission bool
domConf, ok := mox.Conf.Domain(d)
if !ok {
return ClientConfig{}, fmt.Errorf("%w: unknown domain", ErrRequest)
}
gather := func(l config.Listener) (done bool) {
host := mox.Conf.Static.HostnameDomain
if l.Hostname != "" {
host = l.HostnameDomain
}
if domConf.ClientSettingsDomain != "" {
host = domConf.ClientSettingsDNSDomain
}
if !haveIMAP && l.IMAPS.Enabled {
rconfig.IMAP.Host = host
rconfig.IMAP.Port = config.Port(l.IMAPS.Port, 993)
rconfig.IMAP.TLSMode = TLSModeImmediate
rconfig.IMAP.EnabledOnHTTPS = l.IMAPS.EnabledOnHTTPS
haveIMAP = true
}
if !haveIMAP && l.IMAP.Enabled {
rconfig.IMAP.Host = host
rconfig.IMAP.Port = config.Port(l.IMAP.Port, 143)
rconfig.IMAP.TLSMode = TLSModeSTARTTLS
if l.TLS == nil {
rconfig.IMAP.TLSMode = TLSModeNone
}
haveIMAP = true
}
if !haveSubmission && l.Submissions.Enabled {
rconfig.Submission.Host = host
rconfig.Submission.Port = config.Port(l.Submissions.Port, 465)
rconfig.Submission.TLSMode = TLSModeImmediate
rconfig.Submission.EnabledOnHTTPS = l.Submissions.EnabledOnHTTPS
haveSubmission = true
}
if !haveSubmission && l.Submission.Enabled {
rconfig.Submission.Host = host
rconfig.Submission.Port = config.Port(l.Submission.Port, 587)
rconfig.Submission.TLSMode = TLSModeSTARTTLS
if l.TLS == nil {
rconfig.Submission.TLSMode = TLSModeNone
}
haveSubmission = true
}
return haveIMAP && haveSubmission
}
// Look at the public listener first. Most likely the intended configuration.
if public, ok := mox.Conf.Static.Listeners["public"]; ok {
if gather(public) {
return
}
}
// Go through the other listeners in consistent order.
names := slices.Sorted(maps.Keys(mox.Conf.Static.Listeners))
for _, name := range names {
if gather(mox.Conf.Static.Listeners[name]) {
return
}
}
return ClientConfig{}, fmt.Errorf("%w: no listeners found for imap and/or submission", ErrRequest)
}
// ClientConfigs holds the client configuration for IMAP/Submission for a
// domain.
type ClientConfigs struct {
Entries []ClientConfigsEntry
}
type ClientConfigsEntry struct {
Protocol string
Host dns.Domain
Port int
Listener string
Note string
}
// ClientConfigsDomain returns the client configs for IMAP/Submission for a
// domain.
func ClientConfigsDomain(d dns.Domain) (ClientConfigs, error) {
domConf, ok := mox.Conf.Domain(d)
if !ok {
return ClientConfigs{}, fmt.Errorf("%w: unknown domain", ErrRequest)
}
c := ClientConfigs{}
c.Entries = []ClientConfigsEntry{}
var listeners []string
for name := range mox.Conf.Static.Listeners {
listeners = append(listeners, name)
}
slices.Sort(listeners)
note := func(tls bool, requiretls bool) string {
if !tls {
return "plain text, no STARTTLS configured"
}
if requiretls {
return "STARTTLS required"
}
return "STARTTLS optional"
}
for _, name := range listeners {
l := mox.Conf.Static.Listeners[name]
host := mox.Conf.Static.HostnameDomain
if l.Hostname != "" {
host = l.HostnameDomain
}
if domConf.ClientSettingsDomain != "" {
host = domConf.ClientSettingsDNSDomain
}
if l.Submissions.Enabled {
note := "with TLS"
if l.Submissions.EnabledOnHTTPS {
note += "; also served on port 443 with TLS ALPN \"smtp\""
}
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submissions.Port, 465), name, note})
}
if l.IMAPS.Enabled {
note := "with TLS"
if l.IMAPS.EnabledOnHTTPS {
note += "; also served on port 443 with TLS ALPN \"imap\""
}
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 993), name, note})
}
if l.Submission.Enabled {
c.Entries = append(c.Entries, ClientConfigsEntry{"Submission (SMTP)", host, config.Port(l.Submission.Port, 587), name, note(l.TLS != nil, !l.Submission.NoRequireSTARTTLS)})
}
if l.IMAP.Enabled {
c.Entries = append(c.Entries, ClientConfigsEntry{"IMAP", host, config.Port(l.IMAPS.Port, 143), name, note(l.TLS != nil, !l.IMAP.NoRequireSTARTTLS)})
}
}
return c, nil
}

View File

@ -1,318 +0,0 @@
package admin
import (
"crypto"
"crypto/ed25519"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"fmt"
"net/url"
"strings"
"github.com/mjl-/adns"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dkim"
"github.com/mjl-/mox/dmarc"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/spf"
"github.com/mjl-/mox/tlsrpt"
"slices"
)
// todo: find a way to automatically create the dns records as it would greatly simplify setting up email for a domain. we could also dynamically make changes, e.g. providing grace periods after disabling a dkim key, only automatically removing the dkim dns key after a few days. but this requires some kind of api and authentication to the dns server. there doesn't appear to be a single commonly used api for dns management. each of the numerous cloud providers have their own APIs and rather large SKDs to use them. we don't want to link all of them in.
// DomainRecords returns text lines describing DNS records required for configuring
// a domain.
//
// If certIssuerDomainName is set, CAA records to limit TLS certificate issuance to
// that caID will be suggested. If acmeAccountURI is also set, CAA records also
// restricting issuance to that account ID will be suggested.
func DomainRecords(domConf config.Domain, domain dns.Domain, hasDNSSEC bool, certIssuerDomainName, acmeAccountURI string) ([]string, error) {
d := domain.ASCII
h := mox.Conf.Static.HostnameDomain.ASCII
// The first line with ";" is used by ../testdata/integration/moxacmepebble.sh and
// ../testdata/integration/moxmail2.sh for selecting DNS records
records := []string{
"; Time To Live of 5 minutes, may be recognized if importing as a zone file.",
"; Once your setup is working, you may want to increase the TTL.",
"$TTL 300",
"",
}
if public, ok := mox.Conf.Static.Listeners["public"]; ok && public.TLS != nil && (len(public.TLS.HostPrivateRSA2048Keys) > 0 || len(public.TLS.HostPrivateECDSAP256Keys) > 0) {
records = append(records,
`; DANE: These records indicate that a remote mail server trying to deliver email`,
`; with SMTP (TCP port 25) must verify the TLS certificate with DANE-EE (3), based`,
`; on the certificate public key ("SPKI", 1) that is SHA2-256-hashed (1) to the`,
`; hexadecimal hash. DANE-EE verification means only the certificate or public`,
`; key is verified, not whether the certificate is signed by a (centralized)`,
`; certificate authority (CA), is expired, or matches the host name.`,
`;`,
`; NOTE: Create the records below only once: They are for the machine, and apply`,
`; to all hosted domains.`,
)
if !hasDNSSEC {
records = append(records,
";",
"; WARNING: Domain does not appear to be DNSSEC-signed. To enable DANE, first",
"; enable DNSSEC on your domain, then add the TLSA records. Records below have been",
"; commented out.",
)
}
addTLSA := func(privKey crypto.Signer) error {
spkiBuf, err := x509.MarshalPKIXPublicKey(privKey.Public())
if err != nil {
return fmt.Errorf("marshal SubjectPublicKeyInfo for DANE record: %v", err)
}
sum := sha256.Sum256(spkiBuf)
tlsaRecord := adns.TLSA{
Usage: adns.TLSAUsageDANEEE,
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: sum[:],
}
var s string
if hasDNSSEC {
s = fmt.Sprintf("_25._tcp.%-*s TLSA %s", 20+len(d)-len("_25._tcp."), h+".", tlsaRecord.Record())
} else {
s = fmt.Sprintf(";; _25._tcp.%-*s TLSA %s", 20+len(d)-len(";; _25._tcp."), h+".", tlsaRecord.Record())
}
records = append(records, s)
return nil
}
for _, privKey := range public.TLS.HostPrivateECDSAP256Keys {
if err := addTLSA(privKey); err != nil {
return nil, err
}
}
for _, privKey := range public.TLS.HostPrivateRSA2048Keys {
if err := addTLSA(privKey); err != nil {
return nil, err
}
}
records = append(records, "")
}
if d != h {
records = append(records,
"; For the machine, only needs to be created once, for the first domain added:",
"; ",
"; SPF-allow host for itself, resulting in relaxed DMARC pass for (postmaster)",
"; messages (DSNs) sent from host:",
fmt.Sprintf(`%-*s TXT "v=spf1 a -all"`, 20+len(d), h+"."), // ../rfc/7208:2263 ../rfc/7208:2287
"",
)
}
if d != h && mox.Conf.Static.HostTLSRPT.ParsedLocalpart != "" {
uri := url.URL{
Scheme: "mailto",
Opaque: smtp.NewAddress(mox.Conf.Static.HostTLSRPT.ParsedLocalpart, mox.Conf.Static.HostnameDomain).Pack(false),
}
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
records = append(records,
"; For the machine, only needs to be created once, for the first domain added:",
"; ",
"; Request reporting about success/failures of TLS connections to (MX) host, for DANE.",
fmt.Sprintf(`_smtp._tls.%-*s TXT "%s"`, 20+len(d)-len("_smtp._tls."), h+".", tlsrptr.String()),
"",
)
}
records = append(records,
"; Deliver email for the domain to this host.",
fmt.Sprintf("%s. MX 10 %s.", d, h),
"",
"; Outgoing messages will be signed with the first two DKIM keys. The other two",
"; configured for backup, switching to them is just a config change.",
)
var selectors []string
for name := range domConf.DKIM.Selectors {
selectors = append(selectors, name)
}
slices.Sort(selectors)
for _, name := range selectors {
sel := domConf.DKIM.Selectors[name]
dkimr := dkim.Record{
Version: "DKIM1",
Hashes: []string{"sha256"},
PublicKey: sel.Key.Public(),
}
if _, ok := sel.Key.(ed25519.PrivateKey); ok {
dkimr.Key = "ed25519"
} else if _, ok := sel.Key.(*rsa.PrivateKey); !ok {
return nil, fmt.Errorf("unrecognized private key for DKIM selector %q: %T", name, sel.Key)
}
txt, err := dkimr.Record()
if err != nil {
return nil, fmt.Errorf("making DKIM DNS TXT record: %v", err)
}
if len(txt) > 100 {
records = append(records,
"; NOTE: The following is a single long record split over several lines for use",
"; in zone files. When adding through a DNS operator web interface, combine the",
"; strings into a single string, without ().",
)
}
s := fmt.Sprintf("%s._domainkey.%s. TXT %s", name, d, mox.TXTStrings(txt))
records = append(records, s)
}
dmarcr := dmarc.DefaultRecord
dmarcr.Policy = "reject"
if domConf.DMARC != nil {
uri := url.URL{
Scheme: "mailto",
Opaque: smtp.NewAddress(domConf.DMARC.ParsedLocalpart, domConf.DMARC.DNSDomain).Pack(false),
}
dmarcr.AggregateReportAddresses = []dmarc.URI{
{Address: uri.String(), MaxSize: 10, Unit: "m"},
}
}
dspfr := spf.Record{Version: "spf1"}
for _, ip := range mox.DomainSPFIPs() {
mech := "ip4"
if ip.To4() == nil {
mech = "ip6"
}
dspfr.Directives = append(dspfr.Directives, spf.Directive{Mechanism: mech, IP: ip})
}
dspfr.Directives = append(dspfr.Directives,
spf.Directive{Mechanism: "mx"},
spf.Directive{Qualifier: "~", Mechanism: "all"},
)
dspftxt, err := dspfr.Record()
if err != nil {
return nil, fmt.Errorf("making domain spf record: %v", err)
}
records = append(records,
"",
"; Specify the MX host is allowed to send for our domain and for itself (for DSNs).",
"; ~all means softfail for anything else, which is done instead of -all to prevent older",
"; mail servers from rejecting the message because they never get to looking for a dkim/dmarc pass.",
fmt.Sprintf(`%s. TXT "%s"`, d, dspftxt),
"",
"; Emails that fail the DMARC check (without aligned DKIM and without aligned SPF)",
"; should be rejected, and request reports. If you email through mailing lists that",
"; strip DKIM-Signature headers and don't rewrite the From header, you may want to",
"; set the policy to p=none.",
fmt.Sprintf(`_dmarc.%s. TXT "%s"`, d, dmarcr.String()),
"",
)
if sts := domConf.MTASTS; sts != nil {
records = append(records,
"; Remote servers can use MTA-STS to verify our TLS certificate with the",
"; WebPKI pool of CA's (certificate authorities) when delivering over SMTP with",
"; STARTTLS.",
fmt.Sprintf(`mta-sts.%s. CNAME %s.`, d, h),
fmt.Sprintf(`_mta-sts.%s. TXT "v=STSv1; id=%s"`, d, sts.PolicyID),
"",
)
} else {
records = append(records,
"; Note: No MTA-STS to indicate TLS should be used. Either because disabled for the",
"; domain or because mox.conf does not have a listener with MTA-STS configured.",
"",
)
}
if domConf.TLSRPT != nil {
uri := url.URL{
Scheme: "mailto",
Opaque: smtp.NewAddress(domConf.TLSRPT.ParsedLocalpart, domConf.TLSRPT.DNSDomain).Pack(false),
}
tlsrptr := tlsrpt.Record{Version: "TLSRPTv1", RUAs: [][]tlsrpt.RUA{{tlsrpt.RUA(uri.String())}}}
records = append(records,
"; Request reporting about TLS failures.",
fmt.Sprintf(`_smtp._tls.%s. TXT "%s"`, d, tlsrptr.String()),
"",
)
}
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
records = append(records,
"; Client settings will reference a subdomain of the hosted domain, making it",
"; easier to migrate to a different server in the future by not requiring settings",
"; in all clients to be updated.",
fmt.Sprintf(`%-*s CNAME %s.`, 20+len(d), domConf.ClientSettingsDNSDomain.ASCII+".", h),
"",
)
}
records = append(records,
"; Autoconfig is used by Thunderbird. Autodiscover is (in theory) used by Microsoft.",
fmt.Sprintf(`autoconfig.%s. CNAME %s.`, d, h),
fmt.Sprintf(`_autodiscover._tcp.%s. SRV 0 1 443 %s.`, d, h),
"",
// ../rfc/6186:133 ../rfc/8314:692
"; For secure IMAP and submission autoconfig, point to mail host.",
fmt.Sprintf(`_imaps._tcp.%s. SRV 0 1 993 %s.`, d, h),
fmt.Sprintf(`_submissions._tcp.%s. SRV 0 1 465 %s.`, d, h),
"",
// ../rfc/6186:242
"; Next records specify POP3 and non-TLS ports are not to be used.",
"; These are optional and safe to leave out (e.g. if you have to click a lot in a",
"; DNS admin web interface).",
fmt.Sprintf(`_imap._tcp.%s. SRV 0 0 0 .`, d),
fmt.Sprintf(`_submission._tcp.%s. SRV 0 0 0 .`, d),
fmt.Sprintf(`_pop3._tcp.%s. SRV 0 0 0 .`, d),
fmt.Sprintf(`_pop3s._tcp.%s. SRV 0 0 0 .`, d),
)
if certIssuerDomainName != "" {
// ../rfc/8659:18 for CAA records.
records = append(records,
"",
"; Optional:",
"; You could mark Let's Encrypt as the only Certificate Authority allowed to",
"; sign TLS certificates for your domain.",
fmt.Sprintf(`%s. CAA 0 issue "%s"`, d, certIssuerDomainName),
)
if acmeAccountURI != "" {
// ../rfc/8657:99 for accounturi.
// ../rfc/8657:147 for validationmethods.
records = append(records,
";",
"; Optionally limit certificates for this domain to the account ID and methods used by mox.",
fmt.Sprintf(`;; %s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
";",
"; Or alternatively only limit for email-specific subdomains, so you can use",
"; other accounts/methods for other subdomains.",
fmt.Sprintf(`;; autoconfig.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
fmt.Sprintf(`;; mta-sts.%s. CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, d, certIssuerDomainName, acmeAccountURI),
)
if domConf.ClientSettingsDomain != "" && domConf.ClientSettingsDNSDomain != mox.Conf.Static.HostnameDomain {
records = append(records,
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), domConf.ClientSettingsDNSDomain.ASCII, certIssuerDomainName, acmeAccountURI),
)
}
if strings.HasSuffix(h, "."+d) {
records = append(records,
";",
"; And the mail hostname.",
fmt.Sprintf(`;; %-*s CAA 0 issue "%s; accounturi=%s; validationmethods=tls-alpn-01,http-01"`, 20-3+len(d), h+".", certIssuerDomainName, acmeAccountURI),
)
}
} else {
// The string "will be suggested" is used by
// ../testdata/integration/moxacmepebble.sh and ../testdata/integration/moxmail2.sh
// as end of DNS records.
records = append(records,
";",
"; Note: After starting up, once an ACME account has been created, CAA records",
"; that restrict issuance to the account will be suggested.",
)
}
}
return records, nil
}

View File

@ -1,38 +0,0 @@
#!/bin/sh
set -e
prevversion=$(go list -mod=readonly -m -f '{{ .Version }}' github.com/mjl-/mox@latest)
if ! test -d tmp/mox-$prevversion; then
mkdir -p tmp/mox-$prevversion
git archive --format=tar $prevversion | tar -C tmp/mox-$prevversion -xf -
fi
(rm -r tmp/apidiff || exit 0)
mkdir -p tmp/apidiff/$prevversion tmp/apidiff/next
(rm apidiff/next.txt.new 2>/dev/null || exit 0)
touch apidiff/next.txt.new
for p in $(cat apidiff/packages.txt); do
if ! test -d tmp/mox-$prevversion/$p; then
continue
fi
(cd tmp/mox-$prevversion && apidiff -w ../apidiff/$prevversion/$p.api ./$p)
apidiff -w tmp/apidiff/next/$p.api ./$p
apidiff -incompatible tmp/apidiff/$prevversion/$p.api tmp/apidiff/next/$p.api >$p.diff
if test -s $p.diff; then
(
echo '#' $p
cat $p.diff
echo
) >>apidiff/next.txt.new
fi
rm $p.diff
done
if test -s apidiff/next.txt.new; then
(
echo "Below are the incompatible changes between $prevversion and next, per package."
echo
cat apidiff/next.txt.new
) >apidiff/next.txt
rm apidiff/next.txt.new
else
mv apidiff/next.txt.new apidiff/next.txt
fi

View File

@ -1,10 +0,0 @@
This directory lists incompatible changes between released versions for packages
intended for reuse by third party projects, as listed in packages.txt. These
files are generated using golang.org/x/exp/cmd/apidiff (see
https://pkg.go.dev/golang.org/x/exp/apidiff) and ../apidiff.sh.
There is no guarantee that there will be no breaking changes. With Go's
dependency versioning approach (minimal version selection), Go code will never
unexpectedly stop compiling. Incompatibilities will show when explicitly
updating a dependency. Making the required changes is typically fairly
straightforward.

View File

@ -1,5 +0,0 @@
Below are the incompatible changes between v0.0.15 and next, per package.
# smtpclient
- GatherDestinations: changed from func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []HostPref, bool, error)

View File

@ -1,20 +0,0 @@
dane
dmarc
dmarcrpt
dns
dnsbl
iprev
message
mtasts
publicsuffix
ratelimit
sasl
scram
smtp
smtpclient
spf
subjectpass
tlsrpt
updates
webapi
webhook

View File

@ -1,79 +0,0 @@
Below are the incompatible changes between v0.0.9 and v0.0.10, per package.
# dane
- Dial: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage, *crypto/x509.CertPool) (net.Conn, github.com/mjl-/adns.TLSA, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage, *crypto/x509.CertPool) (net.Conn, github.com/mjl-/adns.TLSA, error)
- TLSClientConfig: changed from func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA, *crypto/x509.CertPool) crypto/tls.Config to func(*log/slog.Logger, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA, *crypto/x509.CertPool) crypto/tls.Config
- Verify: changed from func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *crypto/x509.CertPool) (bool, github.com/mjl-/adns.TLSA, error) to func(*log/slog.Logger, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *crypto/x509.CertPool) (bool, github.com/mjl-/adns.TLSA, error)
# dmarc
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error)
- LookupExternalReportsAccepted: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error)
- Verify: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result)
# dmarcrpt
- ParseMessageReport: changed from func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*Feedback, error) to func(*log/slog.Logger, io.ReaderAt) (*Feedback, error)
# dns
- StrictResolver.Log: changed from *golang.org/x/exp/slog.Logger to *log/slog.Logger
# dnsbl
- CheckHealth: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error)
# iprev
# message
- (*Part).ParseNextPart: changed from func(*golang.org/x/exp/slog.Logger) (*Part, error) to func(*log/slog.Logger) (*Part, error)
- (*Part).Walk: changed from func(*golang.org/x/exp/slog.Logger, *Part) error to func(*log/slog.Logger, *Part) error
- EnsurePart: changed from func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt, int64) (Part, error) to func(*log/slog.Logger, bool, io.ReaderAt, int64) (Part, error)
- From: changed from func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error) to func(*log/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error)
- Parse: changed from func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (Part, error) to func(*log/slog.Logger, bool, io.ReaderAt) (Part, error)
# mtasts
- FetchPolicy: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) (*Policy, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Domain) (*Policy, string, error)
- Get: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error)
- HTTPClientObserve: changed from func(context.Context, *golang.org/x/exp/slog.Logger, string, string, int, error, time.Time) to func(context.Context, *log/slog.Logger, string, string, int, error, time.Time)
- LookupRecord: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
# publicsuffix
- List.Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
- ParseList: changed from func(*golang.org/x/exp/slog.Logger, io.Reader) (List, error) to func(*log/slog.Logger, io.Reader) (List, error)
# ratelimit
# sasl
# scram
# smtp
- SePol7ARCFail: removed
- SePol7MissingReqTLS: removed
# smtpclient
- Dial: changed from func(context.Context, *golang.org/x/exp/slog.Logger, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP, []net.IP) (net.Conn, net.IP, error) to func(context.Context, *log/slog.Logger, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP, []net.IP) (net.Conn, net.IP, error)
- Error: old is comparable, new is not
- GatherDestinations: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error)
- GatherIPs: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error)
- GatherTLSA: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error)
- New: changed from func(context.Context, *golang.org/x/exp/slog.Logger, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error) to func(context.Context, *log/slog.Logger, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error)
# spf
- Evaluate: changed from func(context.Context, *golang.org/x/exp/slog.Logger, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error) to func(context.Context, *log/slog.Logger, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error)
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error)
- Verify: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error)
# subjectpass
- Generate: changed from func(*golang.org/x/exp/slog.Logger, github.com/mjl-/mox/smtp.Address, []byte, time.Time) string to func(*log/slog.Logger, github.com/mjl-/mox/smtp.Address, []byte, time.Time) string
- Verify: changed from func(*golang.org/x/exp/slog.Logger, io.ReaderAt, []byte, time.Duration) error to func(*log/slog.Logger, io.ReaderAt, []byte, time.Duration) error
# tlsrpt
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
- ParseMessage: changed from func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*ReportJSON, error) to func(*log/slog.Logger, io.ReaderAt) (*ReportJSON, error)
# updates
- Check: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error)
- FetchChangelog: changed from func(context.Context, *golang.org/x/exp/slog.Logger, string, Version, []byte) (*Changelog, error) to func(context.Context, *log/slog.Logger, string, Version, []byte) (*Changelog, error)
- HTTPClientObserve: changed from func(context.Context, *golang.org/x/exp/slog.Logger, string, string, int, error, time.Time) to func(context.Context, *log/slog.Logger, string, string, int, error, time.Time)
- Lookup: changed from func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error)

View File

@ -1,45 +0,0 @@
Below are the incompatible changes between v0.0.10 and v0.0.11, per package.
# dane
# dmarc
- DMARCPolicy: removed
# dmarcrpt
# dns
# dnsbl
# iprev
# message
- (*Composer).TextPart: changed from func(string) ([]byte, string, string) to func(string, string) ([]byte, string, string)
- From: changed from func(*log/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error) to func(*log/slog.Logger, bool, io.ReaderAt, *Part) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error)
- NewComposer: changed from func(io.Writer, int64) *Composer to func(io.Writer, int64, bool) *Composer
# mtasts
- STSMX: removed
# publicsuffix
# ratelimit
# sasl
# scram
# smtp
- SeMsg6ConversoinUnsupported3: removed
# smtpclient
- GatherIPs: changed from func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error) to func(context.Context, *log/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error)
# spf
# subjectpass
# tlsrpt
# updates

View File

@ -1,43 +0,0 @@
Below are the incompatible changes between v0.0.11 and next, per package.
# dane
# dmarc
# dmarcrpt
# dns
# dnsbl
# iprev
# message
- (*HeaderWriter).AddWrap: changed from func([]byte) to func([]byte, bool)
# mtasts
# publicsuffix
# ratelimit
# sasl
# scram
# smtp
# smtpclient
# spf
# subjectpass
# tlsrpt
# updates
# webapi
# webhook

View File

@ -1,5 +0,0 @@
Below are the incompatible changes between v0.0.13 and next, per package.
# webhook
- PartStructure: removed

View File

@ -1,7 +0,0 @@
Below are the incompatible changes between v0.0.14 and next, per package.
# message
- Part.ContentDescription: changed from string to *string
- Part.ContentID: changed from string to *string
- Part.ContentTransferEncoding: changed from string to *string

View File

@ -1,83 +0,0 @@
Below are the incompatible changes between v0.0.8 and v0.0.9, per package.
# dane
- Dial: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage) (net.Conn, github.com/mjl-/adns.TLSA, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, string, string, []github.com/mjl-/adns.TLSAUsage, *crypto/x509.CertPool) (net.Conn, github.com/mjl-/adns.TLSA, error)
- TLSClientConfig: changed from func(*github.com/mjl-/mox/mlog.Log, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA) crypto/tls.Config to func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *github.com/mjl-/adns.TLSA, *crypto/x509.CertPool) crypto/tls.Config
- Verify: changed from func(*github.com/mjl-/mox/mlog.Log, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain) (bool, github.com/mjl-/adns.TLSA, error) to func(*golang.org/x/exp/slog.Logger, []github.com/mjl-/adns.TLSA, crypto/tls.ConnectionState, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.Domain, *crypto/x509.CertPool) (bool, github.com/mjl-/adns.TLSA, error)
# dmarc
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, github.com/mjl-/mox/dns.Domain, *Record, string, bool, error)
- LookupExternalReportsAccepted: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain) (bool, Status, []*Record, []string, bool, error)
- Verify: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dkim.Result, github.com/mjl-/mox/spf.Status, *github.com/mjl-/mox/dns.Domain, bool) (bool, Result)
# dmarcrpt
- ParseMessageReport: changed from func(*github.com/mjl-/mox/mlog.Log, io.ReaderAt) (*Feedback, error) to func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*Feedback, error)
# dns
# dnsbl
- CheckHealth: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) error
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, net.IP) (Status, string, error)
# iprev
# message
- (*Part).ParseNextPart: changed from func(*github.com/mjl-/mox/mlog.Log) (*Part, error) to func(*golang.org/x/exp/slog.Logger) (*Part, error)
- (*Part).Walk: changed from func(*github.com/mjl-/mox/mlog.Log, *Part) error to func(*golang.org/x/exp/slog.Logger, *Part) error
- EnsurePart: changed from func(*github.com/mjl-/mox/mlog.Log, bool, io.ReaderAt, int64) (Part, error) to func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt, int64) (Part, error)
- From: changed from func(*github.com/mjl-/mox/mlog.Log, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, net/textproto.MIMEHeader, error) to func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (github.com/mjl-/mox/smtp.Address, *Envelope, net/textproto.MIMEHeader, error)
- Parse: changed from func(*github.com/mjl-/mox/mlog.Log, bool, io.ReaderAt) (Part, error) to func(*golang.org/x/exp/slog.Logger, bool, io.ReaderAt) (Part, error)
- TLSReceivedComment: removed
# mtasts
- FetchPolicy: changed from func(context.Context, github.com/mjl-/mox/dns.Domain) (*Policy, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) (*Policy, string, error)
- Get: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, *Policy, string, error)
- LookupRecord: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
# publicsuffix
- List.Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Domain) github.com/mjl-/mox/dns.Domain
- ParseList: changed from func(io.Reader) (List, error) to func(*golang.org/x/exp/slog.Logger, io.Reader) (List, error)
# ratelimit
# sasl
- NewClientSCRAMSHA1: changed from func(string, string) Client to func(string, string, bool) Client
- NewClientSCRAMSHA256: changed from func(string, string) Client to func(string, string, bool) Client
# scram
- HMAC: removed
- NewClient: changed from func(func() hash.Hash, string, string) *Client to func(func() hash.Hash, string, string, bool, *crypto/tls.ConnectionState) *Client
- NewServer: changed from func(func() hash.Hash, []byte) (*Server, error) to func(func() hash.Hash, []byte, *crypto/tls.ConnectionState, bool) (*Server, error)
# smtp
# smtpclient
- (*Client).TLSEnabled: removed
- Dial: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP) (net.Conn, net.IP, error) to func(context.Context, *golang.org/x/exp/slog.Logger, Dialer, github.com/mjl-/mox/dns.IPDomain, []net.IP, int, map[string][]net.IP, []net.IP) (net.Conn, net.IP, error)
- GatherDestinations: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain) (bool, bool, bool, github.com/mjl-/mox/dns.Domain, []github.com/mjl-/mox/dns.IPDomain, bool, error)
- GatherIPs: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.IPDomain, map[string][]net.IP) (bool, bool, github.com/mjl-/mox/dns.Domain, []net.IP, bool, error)
- GatherTLSA: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, bool, github.com/mjl-/mox/dns.Domain) (bool, []github.com/mjl-/adns.TLSA, github.com/mjl-/mox/dns.Domain, error)
- New: changed from func(context.Context, *github.com/mjl-/mox/mlog.Log, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error) to func(context.Context, *golang.org/x/exp/slog.Logger, net.Conn, TLSMode, bool, github.com/mjl-/mox/dns.Domain, github.com/mjl-/mox/dns.Domain, Opts) (*Client, error)
- Opts.Auth: changed from []github.com/mjl-/mox/sasl.Client to func([]string, *crypto/tls.ConnectionState) (github.com/mjl-/mox/sasl.Client, error)
# spf
- Evaluate: changed from func(context.Context, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, *Record, github.com/mjl-/mox/dns.Resolver, Args) (Status, string, string, bool, error)
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Status, string, *Record, bool, error)
- Verify: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, Args) (Received, github.com/mjl-/mox/dns.Domain, string, bool, error)
# subjectpass
- Generate: changed from func(github.com/mjl-/mox/smtp.Address, []byte, time.Time) string to func(*golang.org/x/exp/slog.Logger, github.com/mjl-/mox/smtp.Address, []byte, time.Time) string
- Verify: changed from func(*github.com/mjl-/mox/mlog.Log, io.ReaderAt, []byte, time.Duration) error to func(*golang.org/x/exp/slog.Logger, io.ReaderAt, []byte, time.Duration) error
# tlsrpt
- (*TLSRPTDateRange).UnmarshalJSON: removed
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (*Record, string, error)
- Parse: changed from func(io.Reader) (*Report, error) to func(io.Reader) (*ReportJSON, error)
- ParseMessage: changed from func(*github.com/mjl-/mox/mlog.Log, io.ReaderAt) (*Report, error) to func(*golang.org/x/exp/slog.Logger, io.ReaderAt) (*ReportJSON, error)
# updates
- Check: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain, Version, string, []byte) (Version, *Record, *Changelog, error)
- FetchChangelog: changed from func(context.Context, string, Version, []byte) (*Changelog, error) to func(context.Context, *golang.org/x/exp/slog.Logger, string, Version, []byte) (*Changelog, error)
- Lookup: changed from func(context.Context, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error) to func(context.Context, *golang.org/x/exp/slog.Logger, github.com/mjl-/mox/dns.Resolver, github.com/mjl-/mox/dns.Domain) (Version, *Record, error)

View File

@ -20,7 +20,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
@ -29,37 +28,19 @@ import (
"sync" "sync"
"time" "time"
"golang.org/x/crypto/acme"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"golang.org/x/crypto/acme"
"github.com/mjl-/autocert" "golang.org/x/crypto/acme/autocert"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/moxvar" "github.com/mjl-/mox/moxvar"
) )
var xlog = mlog.New("autotls")
var ( var (
metricMissingServerName = promauto.NewCounter(
prometheus.CounterOpts{
Name: "mox_autotls_missing_servername_total",
Help: "Number of failed TLS connection attempts with missing SNI where no fallback hostname was configured.",
},
)
metricUnknownServerName = promauto.NewCounter(
prometheus.CounterOpts{
Name: "mox_autotls_unknown_servername_total",
Help: "Number of failed TLS connection attempts with an unrecognized SNI name where no fallback hostname was configured.",
},
)
metricCertRequestErrors = promauto.NewCounter(
prometheus.CounterOpts{
Name: "mox_autotls_cert_request_errors_total",
Help: "Number of errors trying to retrieve a certificate for a hostname, possibly ACME verification errors.",
},
)
metricCertput = promauto.NewCounter( metricCertput = promauto.NewCounter(
prometheus.CounterOpts{ prometheus.CounterOpts{
Name: "mox_autotls_certput_total", Name: "mox_autotls_certput_total",
@ -72,6 +53,7 @@ var (
// certificates for allowlisted hosts. // certificates for allowlisted hosts.
type Manager struct { type Manager struct {
ACMETLSConfig *tls.Config // For serving HTTPS on port 443, which is required for certificate requests to succeed. ACMETLSConfig *tls.Config // For serving HTTPS on port 443, which is required for certificate requests to succeed.
TLSConfig *tls.Config // For all TLS servers not used for validating ACME requests. Like SMTP and IMAP (including with STARTTLS) and HTTPS on ports other than 443.
Manager *autocert.Manager Manager *autocert.Manager
shutdown <-chan struct{} shutdown <-chan struct{}
@ -82,19 +64,10 @@ type Manager struct {
// Load returns an initialized autotls manager for "name" (used for the ACME key // Load returns an initialized autotls manager for "name" (used for the ACME key
// file and requested certs and their keys). All files are stored within acmeDir. // file and requested certs and their keys). All files are stored within acmeDir.
//
// contactEmail must be a valid email address to which notifications about ACME can // contactEmail must be a valid email address to which notifications about ACME can
// be sent. directoryURL is the ACME starting point. // be sent. directoryURL is the ACME starting point. When shutdown is closed, no
// // new TLS connections can be created.
// eabKeyID and eabKey are for external account binding when making a new account, func Load(name, acmeDir, contactEmail, directoryURL string, shutdown <-chan struct{}) (*Manager, error) {
// which some ACME providers require.
//
// getPrivateKey is called to get the private key for the host and key type. It
// can be used to deliver a specific (e.g. always the same) private key for a
// host, or a newly generated key.
//
// When shutdown is closed, no new TLS connections can be created.
func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKeyID string, eabKey []byte, getPrivateKey func(host string, keyType autocert.KeyType) (crypto.Signer, error), shutdown <-chan struct{}) (*Manager, error) {
if directoryURL == "" { if directoryURL == "" {
return nil, fmt.Errorf("empty ACME directory URL") return nil, fmt.Errorf("empty ACME directory URL")
} }
@ -103,14 +76,11 @@ func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKey
} }
// Load identity key if it exists. Otherwise, create a new key. // Load identity key if it exists. Otherwise, create a new key.
p := filepath.Join(acmeDir, name+".key") p := filepath.Join(acmeDir + "/" + name + ".key")
var key crypto.Signer var key crypto.Signer
f, err := os.Open(p) f, err := os.Open(p)
if f != nil { if f != nil {
defer func() { defer f.Close()
err := f.Close()
log.Check(err, "closing identify key file")
}()
} }
if err != nil && os.IsNotExist(err) { if err != nil && os.IsNotExist(err) {
key, err = ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) key, err = ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
@ -158,7 +128,7 @@ func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKey
} }
m := &autocert.Manager{ m := &autocert.Manager{
Cache: dirCache(filepath.Join(acmeDir, "keycerts", name)), Cache: dirCache(acmeDir + "/keycerts/" + name),
Prompt: autocert.AcceptTOS, Prompt: autocert.AcceptTOS,
Email: contactEmail, Email: contactEmail,
Client: &acme.Client{ Client: &acme.Client{
@ -166,163 +136,57 @@ func Load(log mlog.Log, name, acmeDir, contactEmail, directoryURL string, eabKey
Key: key, Key: key,
UserAgent: "mox/" + moxvar.Version, UserAgent: "mox/" + moxvar.Version,
}, },
GetPrivateKey: getPrivateKey,
// HostPolicy set below. // HostPolicy set below.
} }
// If external account binding key is provided, use it for registering a new account.
// todo: ideally the key and its id are provided temporarily by the admin when registering a new account. but we don't do that interactive setup yet. in the future, an interactive setup/quickstart would ask for the key once to register a new acme account. loggingGetCertificate := func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
if eabKeyID != "" { log := xlog.WithContext(hello.Context())
m.ExternalAccountBinding = &acme.ExternalAccountBinding{
KID: eabKeyID, // Handle missing SNI to prevent logging an error below.
Key: eabKey, // At startup, during config initialization, we already adjust the tls config to
// inject the listener hostname if there isn't one in the TLS client hello. This is
// common for SMTP STARTTLS connections, which often do not care about the
// validation of the certificate.
if hello.ServerName == "" {
log.Debug("tls request without sni servername, rejecting", mlog.Field("localaddr", hello.Conn.LocalAddr()), mlog.Field("supportedprotos", hello.SupportedProtos))
return nil, fmt.Errorf("sni server name required")
} }
cert, err := m.GetCertificate(hello)
if err != nil {
if errors.Is(err, errHostNotAllowed) {
log.Debugx("requesting certificate", err, mlog.Field("host", hello.ServerName))
} else {
log.Errorx("requesting certificate", err, mlog.Field("host", hello.ServerName))
}
}
return cert, err
}
acmeTLSConfig := *m.TLSConfig()
acmeTLSConfig.GetCertificate = loggingGetCertificate
tlsConfig := tls.Config{
GetCertificate: loggingGetCertificate,
} }
a := &Manager{ a := &Manager{
Manager: m, ACMETLSConfig: &acmeTLSConfig,
shutdown: shutdown, TLSConfig: &tlsConfig,
hosts: map[dns.Domain]struct{}{}, Manager: m,
shutdown: shutdown,
hosts: map[dns.Domain]struct{}{},
} }
m.HostPolicy = a.HostPolicy m.HostPolicy = a.HostPolicy
acmeTLSConfig := *m.TLSConfig()
acmeTLSConfig.GetCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return a.loggingGetCertificate(hello, dns.Domain{}, false, false)
}
a.ACMETLSConfig = &acmeTLSConfig
return a, nil return a, nil
} }
// loggingGetCertificate is a helper to implement crypto/tls.Config.GetCertificate,
// optionally falling back to a certificate for fallbackHostname in case SNI is
// absent or for an unknown hostname.
func (m *Manager) loggingGetCertificate(hello *tls.ClientHelloInfo, fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) (*tls.Certificate, error) {
log := mlog.New("autotls", nil).WithContext(hello.Context()).With(
slog.Any("localaddr", hello.Conn.LocalAddr()),
slog.Any("supportedprotos", hello.SupportedProtos),
slog.String("servername", hello.ServerName),
)
// If we can't find a certificate (depending on fallback parameters), we return a
// nil certificate and nil error, which crypto/tls turns into a TLS alert
// "unrecognized name", which can be interpreted by clients as a hint that they are
// using the wrong hostname, or a certificate is missing. ../rfc/9325:578
// IP addresses for ServerName are not allowed, but happen in practice. If we
// should be lenient (fallbackUnknownSNI), we switch to the fallback hostname,
// otherwise we return an error. We don't want to pass IP addresses to
// GetCertificate because it will return an error for IPv6 addresses.
// ../rfc/6066:367 ../rfc/4366:535
if net.ParseIP(hello.ServerName) != nil {
if fallbackUnknownSNI {
hello.ServerName = fallbackHostname.ASCII
log = log.With(slog.String("servername", hello.ServerName))
} else {
log.Debug("tls request with ip for server name, rejecting")
return nil, fmt.Errorf("invalid ip address for sni server name")
}
}
if hello.ServerName == "" && fallbackNoSNI {
hello.ServerName = fallbackHostname.ASCII
log = log.With(slog.String("servername", hello.ServerName))
}
// Handle missing SNI to prevent logging an error below.
if hello.ServerName == "" {
metricMissingServerName.Inc()
log.Debug("tls request without sni server name, rejecting")
return nil, nil
}
cert, err := m.Manager.GetCertificate(hello)
if err != nil && errors.Is(err, errHostNotAllowed) {
if !fallbackUnknownSNI {
metricUnknownServerName.Inc()
log.Debugx("requesting certificate", err)
return nil, nil
}
// Some legitimate email deliveries over SMTP use an unknown SNI, e.g. a bare
// domain instead of the MX hostname. We "should" return an error, but that would
// break email delivery, so we use the fallback name if it is configured.
// ../rfc/9325:589
log = log.With(slog.String("servername", hello.ServerName))
log.Debug("certificate for unknown hostname, using fallback hostname")
hello.ServerName = fallbackHostname.ASCII
cert, err = m.Manager.GetCertificate(hello)
if err != nil {
metricCertRequestErrors.Inc()
log.Errorx("requesting certificate for fallback hostname", err)
} else {
log.Debug("using certificate for fallback hostname")
}
return cert, err
} else if err != nil {
metricCertRequestErrors.Inc()
log.Errorx("requesting certificate", err)
}
return cert, err
}
// TLSConfig returns a TLS server config that optionally returns a certificate for
// fallbackHostname if no SNI was done, or for an unknown hostname.
//
// If fallbackNoSNI is set, TLS connections without SNI will use a certificate for
// fallbackHostname. Otherwise, connections without SNI will fail with a message
// that no TLS certificate is available.
//
// If fallbackUnknownSNI is set, TLS connections with an SNI hostname that is not
// allowlisted will instead use a certificate for fallbackHostname. Otherwise, such
// TLS connections will fail.
func (m *Manager) TLSConfig(fallbackHostname dns.Domain, fallbackNoSNI, fallbackUnknownSNI bool) *tls.Config {
return &tls.Config{
GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
return m.loggingGetCertificate(hello, fallbackHostname, fallbackNoSNI, fallbackUnknownSNI)
},
}
}
// CertAvailable checks whether a non-expired ECDSA certificate is available in the
// cache for host. No other checks than expiration are done.
func (m *Manager) CertAvailable(ctx context.Context, log mlog.Log, host dns.Domain) (bool, error) {
ck := host.ASCII // Would be "+rsa" for rsa keys.
data, err := m.Manager.Cache.Get(ctx, ck)
if err != nil && errors.Is(err, autocert.ErrCacheMiss) {
return false, nil
} else if err != nil {
return false, fmt.Errorf("attempt to get certificate from cache: %v", err)
}
// The cached keycert is of the form: private key, leaf certificate, intermediate certificates...
privb, rem := pem.Decode(data)
if privb == nil {
return false, fmt.Errorf("missing private key in cached keycert file")
}
pubb, _ := pem.Decode(rem)
if pubb == nil {
return false, fmt.Errorf("missing certificate in cached keycert file")
} else if pubb.Type != "CERTIFICATE" {
return false, fmt.Errorf("second pem block is %q, expected CERTIFICATE", pubb.Type)
}
cert, err := x509.ParseCertificate(pubb.Bytes)
if err != nil {
return false, fmt.Errorf("parsing certificate from cached keycert file: %v", err)
}
// We assume the certificate has a matching hostname, and is properly CA-signed. We
// only check the expiration time.
if time.Until(cert.NotBefore) > 0 || time.Since(cert.NotAfter) > 0 {
return false, nil
}
return true, nil
}
// SetAllowedHostnames sets a new list of allowed hostnames for automatic TLS. // SetAllowedHostnames sets a new list of allowed hostnames for automatic TLS.
// After setting the host names, a goroutine is start to check that new host names // After setting the host names, a goroutine is start to check that new host names
// are fully served by publicIPs (only if non-empty and there is no unspecified // are fully served by publicIPs (only if non-empty and there is no unspecified
// address in the list). If no, log an error with a warning that ACME validation // address in the list). If no, log an error with a warning that ACME validation
// may fail. // may fail.
func (m *Manager) SetAllowedHostnames(log mlog.Log, resolver dns.Resolver, hostnames map[dns.Domain]struct{}, publicIPs []string, checkHosts bool) { func (m *Manager) SetAllowedHostnames(resolver dns.Resolver, hostnames map[dns.Domain]struct{}, publicIPs []string, checkHosts bool) {
m.Lock() m.Lock()
defer m.Unlock() defer m.Unlock()
@ -335,7 +199,7 @@ func (m *Manager) SetAllowedHostnames(log mlog.Log, resolver dns.Resolver, hostn
return l[i].Name() < l[j].Name() return l[i].Name() < l[j].Name()
}) })
log.Debug("autotls setting allowed hostnames", slog.Any("hostnames", l), slog.Any("publicips", publicIPs)) xlog.Debug("autotls setting allowed hostnames", mlog.Field("hostnames", l), mlog.Field("publicips", publicIPs))
var added []dns.Domain var added []dns.Domain
for h := range hostnames { for h := range hostnames {
if _, ok := m.hosts[h]; !ok { if _, ok := m.hosts[h]; !ok {
@ -359,20 +223,16 @@ func (m *Manager) SetAllowedHostnames(log mlog.Log, resolver dns.Resolver, hostn
publicIPstrs[ip] = struct{}{} publicIPstrs[ip] = struct{}{}
} }
log.Debug("checking ips of hosts configured for acme tls cert validation") xlog.Debug("checking ips of hosts configured for acme tls cert validation")
for _, h := range added { for _, h := range added {
ips, _, err := resolver.LookupIP(ctx, "ip", h.ASCII+".") ips, err := resolver.LookupIP(ctx, "ip", h.ASCII+".")
if err != nil { if err != nil {
log.Warnx("acme tls cert validation for host may fail due to dns lookup error", err, slog.Any("host", h)) xlog.Errorx("warning: acme tls cert validation for host may fail due to dns lookup error", err, mlog.Field("host", h))
continue continue
} }
for _, ip := range ips { for _, ip := range ips {
if _, ok := publicIPstrs[ip.String()]; !ok { if _, ok := publicIPstrs[ip.String()]; !ok {
log.Warn("acme tls cert validation for host is likely to fail because not all its ips are being listened on", xlog.Error("warning: acme tls cert validation for host is likely to fail because not all its ips are being listened on", mlog.Field("hostname", h), mlog.Field("listenedips", publicIPs), mlog.Field("hostips", ips), mlog.Field("missingip", ip))
slog.Any("hostname", h),
slog.Any("listenedips", publicIPs),
slog.Any("hostips", ips),
slog.Any("missingip", ip))
} }
} }
} }
@ -395,12 +255,12 @@ var errHostNotAllowed = errors.New("autotls: host not in allowlist")
// HostPolicy decides if a host is allowed for use with ACME, i.e. whether a // HostPolicy decides if a host is allowed for use with ACME, i.e. whether a
// certificate will be returned if present and/or will be requested if not yet // certificate will be returned if present and/or will be requested if not yet
// present. Only hosts added with SetAllowedHostnames are allowed. During shutdown, // present. Only hosts added with AllowHostname are allowed. During shutdown, no
// no new connections are allowed. // new connections are allowed.
func (m *Manager) HostPolicy(ctx context.Context, host string) (rerr error) { func (m *Manager) HostPolicy(ctx context.Context, host string) (rerr error) {
log := mlog.New("autotls", nil).WithContext(ctx) log := xlog.WithContext(ctx)
defer func() { defer func() {
log.Debugx("autotls hostpolicy result", rerr, slog.String("host", host)) log.WithContext(ctx).Debugx("autotls hostpolicy result", rerr, mlog.Field("host", host))
}() }()
// Don't request new TLS certs when we are shutting down. // Don't request new TLS certs when we are shutting down.
@ -432,46 +292,46 @@ func (m *Manager) HostPolicy(ctx context.Context, host string) (rerr error) {
type dirCache autocert.DirCache type dirCache autocert.DirCache
func (d dirCache) Delete(ctx context.Context, name string) (rerr error) { func (d dirCache) Delete(ctx context.Context, name string) (rerr error) {
log := mlog.New("autotls", nil).WithContext(ctx) log := xlog.WithContext(ctx)
defer func() { defer func() {
log.Debugx("dircache delete result", rerr, slog.String("name", name)) log.Debugx("dircache delete result", rerr, mlog.Field("name", name))
}() }()
err := autocert.DirCache(d).Delete(ctx, name) err := autocert.DirCache(d).Delete(ctx, name)
if err != nil { if err != nil {
log.Errorx("deleting cert from dir cache", err, slog.String("name", name)) log.Errorx("deleting cert from dir cache", err, mlog.Field("name", name))
} else if !strings.HasSuffix(name, "+token") { } else if !strings.HasSuffix(name, "+token") {
log.Info("autotls cert delete", slog.String("name", name)) log.Info("autotls cert delete", mlog.Field("name", name))
} }
return err return err
} }
func (d dirCache) Get(ctx context.Context, name string) (rbuf []byte, rerr error) { func (d dirCache) Get(ctx context.Context, name string) (rbuf []byte, rerr error) {
log := mlog.New("autotls", nil).WithContext(ctx) log := xlog.WithContext(ctx)
defer func() { defer func() {
log.Debugx("dircache get result", rerr, slog.String("name", name)) log.Debugx("dircache get result", rerr, mlog.Field("name", name))
}() }()
buf, err := autocert.DirCache(d).Get(ctx, name) buf, err := autocert.DirCache(d).Get(ctx, name)
if err != nil && errors.Is(err, autocert.ErrCacheMiss) { if err != nil && errors.Is(err, autocert.ErrCacheMiss) {
log.Infox("getting cert from dir cache", err, slog.String("name", name)) log.Infox("getting cert from dir cache", err, mlog.Field("name", name))
} else if err != nil { } else if err != nil {
log.Errorx("getting cert from dir cache", err, slog.String("name", name)) log.Errorx("getting cert from dir cache", err, mlog.Field("name", name))
} else if !strings.HasSuffix(name, "+token") { } else if !strings.HasSuffix(name, "+token") {
log.Debug("autotls cert get", slog.String("name", name)) log.Debug("autotls cert get", mlog.Field("name", name))
} }
return buf, err return buf, err
} }
func (d dirCache) Put(ctx context.Context, name string, data []byte) (rerr error) { func (d dirCache) Put(ctx context.Context, name string, data []byte) (rerr error) {
log := mlog.New("autotls", nil).WithContext(ctx) log := xlog.WithContext(ctx)
defer func() { defer func() {
log.Debugx("dircache put result", rerr, slog.String("name", name)) log.Debugx("dircache put result", rerr, mlog.Field("name", name))
}() }()
metricCertput.Inc() metricCertput.Inc()
err := autocert.DirCache(d).Put(ctx, name, data) err := autocert.DirCache(d).Put(ctx, name, data)
if err != nil { if err != nil {
log.Errorx("storing cert in dir cache", err, slog.String("name", name)) log.Errorx("storing cert in dir cache", err, mlog.Field("name", name))
} else if !strings.HasSuffix(name, "+token") { } else if !strings.HasSuffix(name, "+token") {
log.Info("autotls cert store", slog.String("name", name)) log.Info("autotls cert store", mlog.Field("name", name))
} }
return err return err
} }

View File

@ -2,30 +2,22 @@ package autotls
import ( import (
"context" "context"
"crypto"
"errors" "errors"
"fmt"
"os" "os"
"reflect" "reflect"
"testing" "testing"
"github.com/mjl-/autocert" "golang.org/x/crypto/acme/autocert"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
) )
func TestAutotls(t *testing.T) { func TestAutotls(t *testing.T) {
log := mlog.New("autotls", nil)
os.RemoveAll("../testdata/autotls") os.RemoveAll("../testdata/autotls")
os.MkdirAll("../testdata/autotls", 0770) os.MkdirAll("../testdata/autotls", 0770)
shutdown := make(chan struct{}) shutdown := make(chan struct{})
m, err := Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown)
getPrivateKey := func(host string, keyType autocert.KeyType) (crypto.Signer, error) {
return nil, fmt.Errorf("not used")
}
m, err := Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown)
if err != nil { if err != nil {
t.Fatalf("load manager: %v", err) t.Fatalf("load manager: %v", err)
} }
@ -36,7 +28,7 @@ func TestAutotls(t *testing.T) {
if err := m.HostPolicy(context.Background(), "mox.example"); err == nil || !errors.Is(err, errHostNotAllowed) { if err := m.HostPolicy(context.Background(), "mox.example"); err == nil || !errors.Is(err, errHostNotAllowed) {
t.Fatalf("hostpolicy, got err %v, expected errHostNotAllowed", err) t.Fatalf("hostpolicy, got err %v, expected errHostNotAllowed", err)
} }
m.SetAllowedHostnames(log, dns.MockResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false) m.SetAllowedHostnames(dns.StrictResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false)
l = m.Hostnames() l = m.Hostnames()
if !reflect.DeepEqual(l, []dns.Domain{{ASCII: "mox.example"}}) { if !reflect.DeepEqual(l, []dns.Domain{{ASCII: "mox.example"}}) {
t.Fatalf("hostnames, got %v, expected single mox.example", l) t.Fatalf("hostnames, got %v, expected single mox.example", l)
@ -82,7 +74,7 @@ func TestAutotls(t *testing.T) {
key0 := m.Manager.Client.Key key0 := m.Manager.Client.Key
m, err = Load(log, "test", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, getPrivateKey, shutdown) m, err = Load("test", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown)
if err != nil { if err != nil {
t.Fatalf("load manager again: %v", err) t.Fatalf("load manager again: %v", err)
} }
@ -90,12 +82,12 @@ func TestAutotls(t *testing.T) {
t.Fatalf("private key changed after reload") t.Fatalf("private key changed after reload")
} }
m.shutdown = make(chan struct{}) m.shutdown = make(chan struct{})
m.SetAllowedHostnames(log, dns.MockResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false) m.SetAllowedHostnames(dns.StrictResolver{}, map[dns.Domain]struct{}{{ASCII: "mox.example"}: {}}, nil, false)
if err := m.HostPolicy(context.Background(), "mox.example"); err != nil { if err := m.HostPolicy(context.Background(), "mox.example"); err != nil {
t.Fatalf("hostpolicy, got err %v, expected no error", err) t.Fatalf("hostpolicy, got err %v, expected no error", err)
} }
m2, err := Load(log, "test2", "../testdata/autotls", "mox@localhost", "https://localhost/", "", nil, nil, shutdown) m2, err := Load("test2", "../testdata/autotls", "mox@localhost", "https://localhost/", shutdown)
if err != nil { if err != nil {
t.Fatalf("load another manager: %v", err) t.Fatalf("load another manager: %v", err)
} }

369
backup.go
View File

@ -7,18 +7,15 @@ import (
"fmt" "fmt"
"io" "io"
"io/fs" "io/fs"
"log/slog"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strconv"
"strings" "strings"
"syscall"
"time" "time"
"github.com/mjl-/bstore" "github.com/mjl-/bstore"
"github.com/mjl-/mox/dmarcdb" "github.com/mjl-/mox/dmarcdb"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-" "github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/moxvar" "github.com/mjl-/mox/moxvar"
"github.com/mjl-/mox/mtastsdb" "github.com/mjl-/mox/mtastsdb"
@ -27,7 +24,7 @@ import (
"github.com/mjl-/mox/tlsrptdb" "github.com/mjl-/mox/tlsrptdb"
) )
func xbackupctl(ctx context.Context, xctl *ctl) { func backupctl(ctx context.Context, ctl *ctl) {
/* protocol: /* protocol:
> "backup" > "backup"
> destdir > destdir
@ -41,144 +38,61 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
// "src" or "dst" are incomplete paths relative to the source or destination data // "src" or "dst" are incomplete paths relative to the source or destination data
// directories. // directories.
dstDir := xctl.xread() dstDataDir := ctl.xread()
verbose := xctl.xread() == "verbose" verbose := ctl.xread() == "verbose"
// Set when an error is encountered. At the end, we warn if set. // Set when an error is encountered. At the end, we warn if set.
var incomplete bool var incomplete bool
// We'll be writing output, and logging both to mox and the ctl stream. // We'll be writing output, and logging both to mox and the ctl stream.
xwriter := xctl.writer() writer := ctl.writer()
// Format easily readable output for the user. // Format easily readable output for the user.
formatLog := func(prefix, text string, err error, attrs ...slog.Attr) []byte { formatLog := func(prefix, text string, err error, fields ...mlog.Pair) []byte {
var b bytes.Buffer var b bytes.Buffer
fmt.Fprint(&b, prefix) fmt.Fprint(&b, prefix)
fmt.Fprint(&b, text) fmt.Fprint(&b, text)
if err != nil { if err != nil {
fmt.Fprint(&b, ": "+err.Error()) fmt.Fprint(&b, ": "+err.Error())
} }
for _, a := range attrs { for _, f := range fields {
fmt.Fprintf(&b, "; %s=%v", a.Key, a.Value) fmt.Fprintf(&b, "; %s=%v", f.Key, f.Value)
} }
fmt.Fprint(&b, "\n") fmt.Fprint(&b, "\n")
return b.Bytes() return b.Bytes()
} }
// Log an error to both the mox service as the user running "mox backup". // Log an error to both the mox service as the user running "mox backup".
pkglogx := func(prefix, text string, err error, attrs ...slog.Attr) { xlogx := func(prefix, text string, err error, fields ...mlog.Pair) {
xctl.log.Errorx(text, err, attrs...) ctl.log.Errorx(text, err, fields...)
xwriter.Write(formatLog(prefix, text, err, attrs...))
_, werr := writer.Write(formatLog(prefix, text, err, fields...))
ctl.xcheck(werr, "write to ctl")
} }
// Log an error but don't mark backup as failed. // Log an error but don't mark backup as failed.
xwarnx := func(text string, err error, attrs ...slog.Attr) { xwarnx := func(text string, err error, fields ...mlog.Pair) {
pkglogx("warning: ", text, err, attrs...) xlogx("warning: ", text, err, fields...)
} }
// Log an error that causes the backup to be marked as failed. We typically // Log an error that causes the backup to be marked as failed. We typically
// continue processing though. // continue processing though.
xerrx := func(text string, err error, attrs ...slog.Attr) { xerrx := func(text string, err error, fields ...mlog.Pair) {
incomplete = true incomplete = true
pkglogx("error: ", text, err, attrs...) xlogx("error: ", text, err, fields...)
} }
// If verbose is enabled, log to the cli command. Always log as info level. // If verbose is enabled, log to the cli command. Always log as info level.
xvlog := func(text string, attrs ...slog.Attr) { xvlog := func(text string, fields ...mlog.Pair) {
xctl.log.Info(text, attrs...) ctl.log.Info(text, fields...)
if verbose { if verbose {
xwriter.Write(formatLog("", text, nil, attrs...)) _, werr := writer.Write(formatLog("", text, nil, fields...))
ctl.xcheck(werr, "write to ctl")
} }
} }
dstConfigDir := filepath.Join(dstDir, "config")
dstDataDir := filepath.Join(dstDir, "data")
// Warn if directories already exist, will likely cause failures when trying to
// write files that already exist.
if _, err := os.Stat(dstConfigDir); err == nil {
xwarnx("destination config directory already exists", nil, slog.String("configdir", dstConfigDir))
}
if _, err := os.Stat(dstDataDir); err == nil { if _, err := os.Stat(dstDataDir); err == nil {
xwarnx("destination data directory already exists", nil, slog.String("datadir", dstDataDir)) xwarnx("destination data directory already exists", nil, mlog.Field("dir", dstDataDir))
}
os.MkdirAll(dstDir, 0770)
os.MkdirAll(dstConfigDir, 0770)
os.MkdirAll(dstDataDir, 0770)
// Copy all files in the config dir.
srcConfigDir := filepath.Clean(mox.ConfigDirPath("."))
err := filepath.WalkDir(srcConfigDir, func(srcPath string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if srcConfigDir == srcPath {
return nil
}
// Trim directory and separator.
relPath := srcPath[len(srcConfigDir)+1:]
destPath := filepath.Join(dstConfigDir, relPath)
if d.IsDir() {
if info, err := os.Stat(srcPath); err != nil {
return fmt.Errorf("stat config dir %s: %v", srcPath, err)
} else if err := os.Mkdir(destPath, info.Mode()&0777); err != nil {
return fmt.Errorf("mkdir %s: %v", destPath, err)
}
return nil
}
if d.Type()&fs.ModeSymlink != 0 {
linkDest, err := os.Readlink(srcPath)
if err != nil {
return fmt.Errorf("reading symlink %s: %v", srcPath, err)
}
if err := os.Symlink(linkDest, destPath); err != nil {
return fmt.Errorf("creating symlink %s: %v", destPath, err)
}
return nil
}
if !d.Type().IsRegular() {
xwarnx("skipping non-regular/dir/symlink file in config dir", nil, slog.String("path", srcPath))
return nil
}
sf, err := os.Open(srcPath)
if err != nil {
return fmt.Errorf("open config file %s: %v", srcPath, err)
}
info, err := sf.Stat()
if err != nil {
return fmt.Errorf("stat config file %s: %v", srcPath, err)
}
df, err := os.OpenFile(destPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0777&info.Mode())
if err != nil {
return fmt.Errorf("create destination config file %s: %v", destPath, err)
}
defer func() {
if df != nil {
err := df.Close()
xctl.log.Check(err, "closing file")
}
}()
defer func() {
err := sf.Close()
xctl.log.Check(err, "closing file")
}()
if _, err := io.Copy(df, sf); err != nil {
return fmt.Errorf("copying config file %s to %s: %v", srcPath, destPath, err)
}
if err := df.Close(); err != nil {
return fmt.Errorf("closing destination config file %s: %v", srcPath, err)
}
df = nil
return nil
})
if err != nil {
xerrx("storing config directory", err)
} }
srcDataDir := filepath.Clean(mox.DataDirPath(".")) srcDataDir := filepath.Clean(mox.DataDirPath("."))
@ -205,37 +119,33 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
sf, err := os.Open(srcpath) sf, err := os.Open(srcpath)
if err != nil { if err != nil {
xerrx("open source file (not backed up)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath)) xerrx("open source file (not backed up)", err, mlog.Field("srcpath", srcpath), mlog.Field("dstpath", dstpath))
return return
} }
defer func() { defer sf.Close()
err := sf.Close()
xctl.log.Check(err, "closing source file")
}()
ensureDestDir(dstpath) ensureDestDir(dstpath)
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660) df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
if err != nil { if err != nil {
xerrx("creating destination file (not backed up)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath)) xerrx("creating destination file (not backed up)", err, mlog.Field("srcpath", srcpath), mlog.Field("dstpath", dstpath))
return return
} }
defer func() { defer func() {
if df != nil { if df != nil {
err := df.Close() df.Close()
xctl.log.Check(err, "closing destination file")
} }
}() }()
if _, err := io.Copy(df, sf); err != nil { if _, err := io.Copy(df, sf); err != nil {
xerrx("copying file (not backed up properly)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath)) xerrx("copying file (not backed up properly)", err, mlog.Field("srcpath", srcpath), mlog.Field("dstpath", dstpath))
return return
} }
err = df.Close() err = df.Close()
df = nil df = nil
if err != nil { if err != nil {
xerrx("closing destination file (not backed up properly)", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath)) xerrx("closing destination file (not backed up properly)", err, mlog.Field("srcpath", srcpath), mlog.Field("dstpath", dstpath))
return return
} }
xvlog("backed up file", slog.String("path", path), slog.Duration("duration", time.Since(tmFile))) xvlog("backed up file", mlog.Field("path", path), mlog.Field("duration", time.Since(tmFile)))
} }
// Back up the files in a directory (by copying). // Back up the files in a directory (by copying).
@ -245,7 +155,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
dstdir := filepath.Join(dstDataDir, dir) dstdir := filepath.Join(dstDataDir, dir)
err := filepath.WalkDir(srcdir, func(srcpath string, d fs.DirEntry, err error) error { err := filepath.WalkDir(srcdir, func(srcpath string, d fs.DirEntry, err error) error {
if err != nil { if err != nil {
xerrx("walking file (not backed up)", err, slog.String("srcpath", srcpath)) xerrx("walking file (not backed up)", err, mlog.Field("srcpath", srcpath))
return nil return nil
} }
if d.IsDir() { if d.IsDir() {
@ -255,18 +165,24 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
return nil return nil
}) })
if err != nil { if err != nil {
xerrx("copying directory (not backed up properly)", err, xerrx("copying directory (not backed up properly)", err, mlog.Field("srcdir", srcdir), mlog.Field("dstdir", dstdir), mlog.Field("duration", time.Since(tmDir)))
slog.String("srcdir", srcdir),
slog.String("dstdir", dstdir),
slog.Duration("duration", time.Since(tmDir)))
return return
} }
xvlog("backed up directory", slog.String("dir", dir), slog.Duration("duration", time.Since(tmDir))) xvlog("backed up directory", mlog.Field("dir", dir), mlog.Field("duration", time.Since(tmDir)))
} }
// Backup a database by copying it in a readonly transaction. Wrapped by backupDB // Backup a database by copying it in a readonly transaction.
// which logs and returns just a bool. // Always logs on error, so caller doesn't have to, but also returns the error so
backupDB0 := func(db *bstore.DB, path string) error { // callers can see result.
backupDB := func(db *bstore.DB, path string) (rerr error) {
defer func() {
if rerr != nil {
xerrx("backing up database", rerr, mlog.Field("path", path))
}
}()
tmDB := time.Now()
dstpath := filepath.Join(dstDataDir, path) dstpath := filepath.Join(dstDataDir, path)
ensureDestDir(dstpath) ensureDestDir(dstpath)
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660) df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
@ -275,8 +191,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
} }
defer func() { defer func() {
if df != nil { if df != nil {
err := df.Close() df.Close()
xctl.log.Check(err, "closing destination database file")
} }
}() }()
err = db.Read(ctx, func(tx *bstore.Tx) error { err = db.Read(ctx, func(tx *bstore.Tx) error {
@ -301,20 +216,10 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
if err != nil { if err != nil {
return fmt.Errorf("closing destination database after copy: %v", err) return fmt.Errorf("closing destination database after copy: %v", err)
} }
xvlog("backed up database file", mlog.Field("path", path), mlog.Field("duration", time.Since(tmDB)))
return nil return nil
} }
backupDB := func(db *bstore.DB, path string) bool {
start := time.Now()
err := backupDB0(db, path)
if err != nil {
xerrx("backing up database", err, slog.String("path", path), slog.Duration("duration", time.Since(start)))
return false
}
xvlog("backed up database file", slog.String("path", path), slog.Duration("duration", time.Since(start)))
return true
}
// Try to create a hardlink. Fall back to copying the file (e.g. when on different file system). // Try to create a hardlink. Fall back to copying the file (e.g. when on different file system).
warnedHardlink := false // We warn once about failing to hardlink. warnedHardlink := false // We warn once about failing to hardlink.
linkOrCopy := func(srcpath, dstpath string) (bool, error) { linkOrCopy := func(srcpath, dstpath string) (bool, error) {
@ -326,11 +231,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
// No point in trying with regular copy, we would warn twice. // No point in trying with regular copy, we would warn twice.
return false, err return false, err
} else if !warnedHardlink { } else if !warnedHardlink {
var hardlinkHint string xwarnx("creating hardlink to message", err, mlog.Field("srcpath", srcpath), mlog.Field("dstpath", dstpath))
if runtime.GOOS == "linux" && errors.Is(err, syscall.EXDEV) {
hardlinkHint = " (hint: if running under systemd, ReadWritePaths in mox.service may cause multiple mountpoints; consider merging paths into a single parent directory to prevent cross-device/mountpoint hardlinks)"
}
xwarnx("creating hardlink to message failed, will be doing regular file copies and not warn again"+hardlinkHint, err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath))
warnedHardlink = true warnedHardlink = true
} }
@ -339,19 +240,15 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
if err != nil { if err != nil {
return false, fmt.Errorf("open source path %s: %v", srcpath, err) return false, fmt.Errorf("open source path %s: %v", srcpath, err)
} }
defer func() { defer sf.Close()
err := sf.Close()
xctl.log.Check(err, "closing copied source file")
}()
df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660) df, err := os.OpenFile(dstpath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0660)
if err != nil { if err != nil {
return false, fmt.Errorf("create destination path %s: %v", dstpath, err) return false, fmt.Errorf("open destination path %s: %v", dstpath, err)
} }
defer func() { defer func() {
if df != nil { if df != nil {
err := df.Close() df.Close()
xctl.log.Check(err, "closing partial destination file")
} }
}() }()
if _, err := io.Copy(df, sf); err != nil { if _, err := io.Copy(df, sf); err != nil {
@ -360,7 +257,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
err = df.Close() err = df.Close()
df = nil df = nil
if err != nil { if err != nil {
return false, fmt.Errorf("closing destination file: %v", err) return false, fmt.Errorf("close: %v", err)
} }
return false, nil return false, nil
} }
@ -368,28 +265,26 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
// Start making the backup. // Start making the backup.
tmStart := time.Now() tmStart := time.Now()
xctl.log.Print("making backup", slog.String("destdir", dstDataDir)) ctl.log.Print("making backup", mlog.Field("destdir", dstDataDir))
if err := os.MkdirAll(dstDataDir, 0770); err != nil { err := os.MkdirAll(dstDataDir, 0770)
if err != nil {
xerrx("creating destination data directory", err) xerrx("creating destination data directory", err)
} }
if err := os.WriteFile(filepath.Join(dstDataDir, "moxversion"), []byte(moxvar.Version), 0660); err != nil { if err := os.WriteFile(filepath.Join(dstDataDir, "moxversion"), []byte(moxvar.Version), 0660); err != nil {
xerrx("writing moxversion", err) xerrx("writing moxversion", err)
} }
backupDB(store.AuthDB, "auth.db") backupDB(dmarcdb.DB, "dmarcrpt.db")
backupDB(dmarcdb.ReportsDB, "dmarcrpt.db")
backupDB(dmarcdb.EvalDB, "dmarceval.db")
backupDB(mtastsdb.DB, "mtasts.db") backupDB(mtastsdb.DB, "mtasts.db")
backupDB(tlsrptdb.ReportDB, "tlsrpt.db") backupDB(tlsrptdb.DB, "tlsrpt.db")
backupDB(tlsrptdb.ResultDB, "tlsrptresult.db")
backupFile("receivedid.key") backupFile("receivedid.key")
// Acme directory is optional. // Acme directory is optional.
srcAcmeDir := filepath.Join(srcDataDir, "acme") srcAcmeDir := filepath.Join(srcDataDir, "acme")
if _, err := os.Stat(srcAcmeDir); err == nil { if _, err := os.Stat(srcAcmeDir); err == nil {
backupDir("acme") backupDir("acme")
} else if !os.IsNotExist(err) { } else if err != nil && !os.IsNotExist(err) {
xerrx("copying acme/", err) xerrx("copying acme/", err)
} }
@ -397,41 +292,38 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
backupQueue := func(path string) { backupQueue := func(path string) {
tmQueue := time.Now() tmQueue := time.Now()
if !backupDB(queue.DB, path) { if err := backupDB(queue.DB, path); err != nil {
xerrx("queue not backed up", err, mlog.Field("path", path), mlog.Field("duration", time.Since(tmQueue)))
return return
} }
dstdbpath := filepath.Join(dstDataDir, path) dstdbpath := filepath.Join(dstDataDir, path)
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger} db, err := bstore.Open(ctx, dstdbpath, &bstore.Options{MustExist: true}, queue.DBTypes...)
db, err := bstore.Open(ctx, dstdbpath, &opts, queue.DBTypes...)
if err != nil { if err != nil {
xerrx("open copied queue database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmQueue))) xerrx("open copied queue database", err, mlog.Field("dstpath", dstdbpath), mlog.Field("duration", time.Since(tmQueue)))
return return
} }
defer func() { defer func() {
if db != nil { if db != nil {
err := db.Close() err := db.Close()
xctl.log.Check(err, "closing new queue db") ctl.log.Check(err, "closing new queue db")
} }
}() }()
// Link/copy known message files. If a message has been removed while we read the // Link/copy known message files. Warn if files are missing or unexpected
// database, our backup is not consistent and the backup will be marked failed. // (though a message file could have been removed just now due to delivery, or a
// new message may have been queued).
tmMsgs := time.Now() tmMsgs := time.Now()
seen := map[string]struct{}{} seen := map[string]struct{}{}
var nlinked, ncopied int var nlinked, ncopied int
var maxID int64
err = bstore.QueryDB[queue.Msg](ctx, db).ForEach(func(m queue.Msg) error { err = bstore.QueryDB[queue.Msg](ctx, db).ForEach(func(m queue.Msg) error {
if m.ID > maxID {
maxID = m.ID
}
mp := store.MessagePath(m.ID) mp := store.MessagePath(m.ID)
seen[mp] = struct{}{} seen[mp] = struct{}{}
srcpath := filepath.Join(srcDataDir, "queue", mp) srcpath := filepath.Join(srcDataDir, "queue", mp)
dstpath := filepath.Join(dstDataDir, "queue", mp) dstpath := filepath.Join(dstDataDir, "queue", mp)
if linked, err := linkOrCopy(srcpath, dstpath); err != nil { if linked, err := linkOrCopy(srcpath, dstpath); err != nil {
xerrx("linking/copying queue message", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath)) xerrx("linking/copying queue message", err, mlog.Field("srcpath", srcpath), mlog.Field("dstpath", dstpath))
} else if linked { } else if linked {
nlinked++ nlinked++
} else { } else {
@ -440,22 +332,17 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
return nil return nil
}) })
if err != nil { if err != nil {
xerrx("processing queue messages (not backed up properly)", err, slog.Duration("duration", time.Since(tmMsgs))) xerrx("processing queue messages (not backed up properly)", err, mlog.Field("duration", time.Since(tmMsgs)))
} else { } else {
xvlog("queue message files linked/copied", xvlog("queue message files linked/copied", mlog.Field("linked", nlinked), mlog.Field("copied", ncopied), mlog.Field("duration", time.Since(tmMsgs)))
slog.Int("linked", nlinked),
slog.Int("copied", ncopied),
slog.Duration("duration", time.Since(tmMsgs)))
} }
// Read through all files in queue directory and warn about anything we haven't // Read through all files in queue directory and warn about anything we haven't handled yet.
// handled yet. Message files that are newer than we expect from our consistent
// database snapshot are ignored.
tmWalk := time.Now() tmWalk := time.Now()
srcqdir := filepath.Join(srcDataDir, "queue") srcqdir := filepath.Join(srcDataDir, "queue")
err = filepath.WalkDir(srcqdir, func(srcqpath string, d fs.DirEntry, err error) error { err = filepath.WalkDir(srcqdir, func(srcqpath string, d fs.DirEntry, err error) error {
if err != nil { if err != nil {
xerrx("walking files in queue", err, slog.String("srcpath", srcqpath)) xerrx("walking files in queue", err, mlog.Field("srcpath", srcqpath))
return nil return nil
} }
if d.IsDir() { if d.IsDir() {
@ -468,43 +355,37 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
if p == "index.db" { if p == "index.db" {
return nil return nil
} }
// Skip any messages that were added since we started on our consistent snapshot.
// We don't want to cause spurious backup warnings.
if id, err := strconv.ParseInt(filepath.Base(p), 10, 64); err == nil && maxID > 0 && id > maxID && p == store.MessagePath(id) {
return nil
}
qp := filepath.Join("queue", p) qp := filepath.Join("queue", p)
xwarnx("backing up unrecognized file in queue directory", nil, slog.String("path", qp)) xwarnx("backing up unrecognized file in queue directory", nil, mlog.Field("path", qp))
backupFile(qp) backupFile(qp)
return nil return nil
}) })
if err != nil { if err != nil {
xerrx("walking queue directory (not backed up properly)", err, slog.String("dir", "queue"), slog.Duration("duration", time.Since(tmWalk))) xerrx("walking queue directory (not backed up properly)", err, mlog.Field("dir", "queue"), mlog.Field("duration", time.Since(tmWalk)))
} else { } else {
xvlog("walked queue directory", slog.Duration("duration", time.Since(tmWalk))) xvlog("walked queue directory", mlog.Field("duration", time.Since(tmWalk)))
} }
xvlog("queue backed finished", slog.Duration("duration", time.Since(tmQueue))) xvlog("queue backed finished", mlog.Field("duration", time.Since(tmQueue)))
} }
backupQueue(filepath.FromSlash("queue/index.db")) backupQueue("queue/index.db")
backupAccount := func(acc *store.Account) { backupAccount := func(acc *store.Account) {
defer func() { defer acc.Close()
err := acc.Close()
xctl.log.Check(err, "closing account")
}()
tmAccount := time.Now() tmAccount := time.Now()
// Copy database file. // Copy database file.
dbpath := filepath.Join("accounts", acc.Name, "index.db") dbpath := filepath.Join("accounts", acc.Name, "index.db")
backupDB(acc.DB, dbpath) err := backupDB(acc.DB, dbpath)
if err != nil {
xerrx("copying account database", err, mlog.Field("path", dbpath), mlog.Field("duration", time.Since(tmAccount)))
}
// todo: should document/check not taking a rlock on account. // todo: should document/check not taking a rlock on account.
// Copy junkfilter files, if configured. // Copy junkfilter files, if configured.
if jf, _, err := acc.OpenJunkFilter(ctx, xctl.log); err != nil { if jf, _, err := acc.OpenJunkFilter(ctx, ctl.log); err != nil {
if !errors.Is(err, store.ErrNoJunkFilter) { if !errors.Is(err, store.ErrNoJunkFilter) {
xerrx("opening junk filter for account (not backed up)", err) xerrx("opening junk filter for account (not backed up)", err)
} }
@ -514,41 +395,39 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
backupDB(db, jfpath) backupDB(db, jfpath)
bloompath := filepath.Join("accounts", acc.Name, "junkfilter.bloom") bloompath := filepath.Join("accounts", acc.Name, "junkfilter.bloom")
backupFile(bloompath) backupFile(bloompath)
db = nil
err := jf.Close() err := jf.Close()
xctl.log.Check(err, "closing junkfilter") ctl.log.Check(err, "closing junkfilter")
} }
dstdbpath := filepath.Join(dstDataDir, dbpath) dstdbpath := filepath.Join(dstDataDir, dbpath)
opts := bstore.Options{MustExist: true, RegisterLogger: xctl.log.Logger} db, err := bstore.Open(ctx, dstdbpath, &bstore.Options{MustExist: true}, store.DBTypes...)
db, err := bstore.Open(ctx, dstdbpath, &opts, store.DBTypes...)
if err != nil { if err != nil {
xerrx("open copied account database", err, slog.String("dstpath", dstdbpath), slog.Duration("duration", time.Since(tmAccount))) xerrx("open copied account database", err, mlog.Field("dstpath", dstdbpath), mlog.Field("duration", time.Since(tmAccount)))
return return
} }
defer func() { defer func() {
if db != nil { if db != nil {
err := db.Close() err := db.Close()
xctl.log.Check(err, "close account database") ctl.log.Check(err, "close account database")
} }
}() }()
// Link/copy known message files. // Link/copy known message files. Warn if files are missing or unexpected (though a
// message file could have been added just now due to delivery, or a message have
// been removed).
tmMsgs := time.Now() tmMsgs := time.Now()
seen := map[string]struct{}{} seen := map[string]struct{}{}
var maxID int64
var nlinked, ncopied int var nlinked, ncopied int
err = bstore.QueryDB[store.Message](ctx, db).FilterEqual("Expunged", false).ForEach(func(m store.Message) error { err = bstore.QueryDB[store.Message](ctx, db).ForEach(func(m store.Message) error {
if m.ID > maxID {
maxID = m.ID
}
mp := store.MessagePath(m.ID) mp := store.MessagePath(m.ID)
seen[mp] = struct{}{} seen[mp] = struct{}{}
amp := filepath.Join("accounts", acc.Name, "msg", mp) amp := filepath.Join("accounts", acc.Name, "msg", mp)
srcpath := filepath.Join(srcDataDir, amp) srcpath := filepath.Join(srcDataDir, amp)
dstpath := filepath.Join(dstDataDir, amp) dstpath := filepath.Join(dstDataDir, amp)
if linked, err := linkOrCopy(srcpath, dstpath); err != nil { if linked, err := linkOrCopy(srcpath, dstpath); err != nil {
xerrx("linking/copying account message", err, slog.String("srcpath", srcpath), slog.String("dstpath", dstpath)) xerrx("linking/copying account message", err, mlog.Field("srcpath", srcpath), mlog.Field("dstpath", dstpath))
} else if linked { } else if linked {
nlinked++ nlinked++
} else { } else {
@ -557,31 +436,17 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
return nil return nil
}) })
if err != nil { if err != nil {
xerrx("processing account messages (not backed up properly)", err, slog.Duration("duration", time.Since(tmMsgs))) xerrx("processing account messages (not backed up properly)", err, mlog.Field("duration", time.Since(tmMsgs)))
} else { } else {
xvlog("account message files linked/copied", xvlog("account message files linked/copied", mlog.Field("linked", nlinked), mlog.Field("copied", ncopied), mlog.Field("duration", time.Since(tmMsgs)))
slog.Int("linked", nlinked),
slog.Int("copied", ncopied),
slog.Duration("duration", time.Since(tmMsgs)))
} }
eraseIDs := map[int64]struct{}{} // Read through all files in account directory and warn about anything we haven't handled yet.
err = bstore.QueryDB[store.MessageErase](ctx, db).ForEach(func(me store.MessageErase) error {
eraseIDs[me.ID] = struct{}{}
return nil
})
if err != nil {
xerrx("listing erased messages", err)
}
// Read through all files in queue directory and warn about anything we haven't
// handled yet. Message files that are newer than we expect from our consistent
// database snapshot are ignored.
tmWalk := time.Now() tmWalk := time.Now()
srcadir := filepath.Join(srcDataDir, "accounts", acc.Name) srcadir := filepath.Join(srcDataDir, "accounts", acc.Name)
err = filepath.WalkDir(srcadir, func(srcapath string, d fs.DirEntry, err error) error { err = filepath.WalkDir(srcadir, func(srcapath string, d fs.DirEntry, err error) error {
if err != nil { if err != nil {
xerrx("walking files in account", err, slog.String("srcpath", srcapath)) xerrx("walking files in account", err, mlog.Field("srcpath", srcapath))
return nil return nil
} }
if d.IsDir() { if d.IsDir() {
@ -594,37 +459,27 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
if _, ok := seen[mp]; ok { if _, ok := seen[mp]; ok {
return nil return nil
} }
// Skip any messages that were added since we started on our consistent snapshot,
// or messages that will be erased. We don't want to cause spurious backup
// warnings.
id, err := strconv.ParseInt(l[len(l)-1], 10, 64)
if err == nil && id > maxID && mp == store.MessagePath(id) {
return nil
} else if _, ok := eraseIDs[id]; err == nil && ok {
return nil
}
} }
switch p { switch p {
case "index.db", "junkfilter.db", "junkfilter.bloom": case "index.db", "junkfilter.db", "junkfilter.bloom":
return nil return nil
} }
ap := filepath.Join("accounts", acc.Name, p) ap := filepath.Join("accounts", acc.Name, p)
if strings.HasPrefix(p, "msg"+string(filepath.Separator)) { if strings.HasPrefix(p, "msg/") {
xwarnx("backing up unrecognized file in account message directory (should be moved away)", nil, slog.String("path", ap)) xwarnx("backing up unrecognized file in account message directory (should be moved away)", nil, mlog.Field("path", ap))
} else { } else {
xwarnx("backing up unrecognized file in account directory", nil, slog.String("path", ap)) xwarnx("backing up unrecognized file in account directory", nil, mlog.Field("path", ap))
} }
backupFile(ap) backupFile(ap)
return nil return nil
}) })
if err != nil { if err != nil {
xerrx("walking account directory (not backed up properly)", err, slog.String("srcdir", srcadir), slog.Duration("duration", time.Since(tmWalk))) xerrx("walking account directory (not backed up properly)", err, mlog.Field("srcdir", srcadir), mlog.Field("duration", time.Since(tmWalk)))
} else { } else {
xvlog("walked account directory", slog.Duration("duration", time.Since(tmWalk))) xvlog("walked account directory", mlog.Field("duration", time.Since(tmWalk)))
} }
xvlog("account backup finished", slog.String("dir", filepath.Join("accounts", acc.Name)), slog.Duration("duration", time.Since(tmAccount))) xvlog("account backup finished", mlog.Field("dir", filepath.Join("accounts", acc.Name)), mlog.Field("duration", time.Since(tmAccount)))
} }
// For each configured account, open it, make a copy of the database and // For each configured account, open it, make a copy of the database and
@ -632,9 +487,9 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
// account directories when handling "all other files" below. // account directories when handling "all other files" below.
accounts := map[string]struct{}{} accounts := map[string]struct{}{}
for _, accName := range mox.Conf.Accounts() { for _, accName := range mox.Conf.Accounts() {
acc, err := store.OpenAccount(xctl.log, accName, false) acc, err := store.OpenAccount(accName)
if err != nil { if err != nil {
xerrx("opening account for copying (will try to copy as regular files later)", err, slog.String("account", accName)) xerrx("opening account for copying (will try to copy as regular files later)", err, mlog.Field("account", accName))
continue continue
} }
accounts[accName] = struct{}{} accounts[accName] = struct{}{}
@ -645,7 +500,7 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
tmWalk := time.Now() tmWalk := time.Now()
err = filepath.WalkDir(srcDataDir, func(srcpath string, d fs.DirEntry, err error) error { err = filepath.WalkDir(srcDataDir, func(srcpath string, d fs.DirEntry, err error) error {
if err != nil { if err != nil {
xerrx("walking path", err, slog.String("path", srcpath)) xerrx("walking path", err, mlog.Field("path", srcpath))
return nil return nil
} }
@ -670,29 +525,29 @@ func xbackupctl(ctx context.Context, xctl *ctl) {
} }
switch p { switch p {
case "auth.db", "dmarcrpt.db", "dmarceval.db", "mtasts.db", "tlsrpt.db", "tlsrptresult.db", "receivedid.key", "ctl": case "dmarcrpt.db", "mtasts.db", "tlsrpt.db", "receivedid.key", "ctl":
// Already handled. // Already handled.
return nil return nil
case "lastknownversion": // Optional file, not yet handled. case "lastknownversion": // Optional file, not yet handled.
default: default:
xwarnx("backing up unrecognized file", nil, slog.String("path", p)) xwarnx("backing up unrecognized file", nil, mlog.Field("path", p))
} }
backupFile(p) backupFile(p)
return nil return nil
}) })
if err != nil { if err != nil {
xerrx("walking other files (not backed up properly)", err, slog.Duration("duration", time.Since(tmWalk))) xerrx("walking other files (not backed up properly)", err, mlog.Field("duration", time.Since(tmWalk)))
} else { } else {
xvlog("walking other files finished", slog.Duration("duration", time.Since(tmWalk))) xvlog("walking other files finished", mlog.Field("duration", time.Since(tmWalk)))
} }
xvlog("backup finished", slog.Duration("duration", time.Since(tmStart))) xvlog("backup finished", mlog.Field("duration", time.Since(tmStart)))
xwriter.xclose() writer.xclose()
if incomplete { if incomplete {
xctl.xwrite("errors were encountered during backup") ctl.xwrite("errors were encountered during backup")
} else { } else {
xctl.xwriteok() ctl.xwriteok()
} }
} }

2
checkhtmljs Executable file
View File

@ -0,0 +1,2 @@
#!/bin/sh
exec ./node_modules/.bin/jshint --extract always $@ | fixjshintlines

View File

@ -5,7 +5,6 @@ import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"net" "net"
"net/http"
"net/url" "net/url"
"reflect" "reflect"
"regexp" "regexp"
@ -20,10 +19,6 @@ import (
// todo: better default values, so less has to be specified in the config file. // todo: better default values, so less has to be specified in the config file.
// DefaultMaxMsgSize is the maximum message size for incoming and outgoing
// messages, in bytes. Can be overridden per listener.
const DefaultMaxMsgSize = 100 * 1024 * 1024
// Port returns port if non-zero, and fallback otherwise. // Port returns port if non-zero, and fallback otherwise.
func Port(port, fallback int) int { func Port(port, fallback int) int {
if port == 0 { if port == 0 {
@ -35,14 +30,14 @@ func Port(port, fallback int) int {
// Static is a parsed form of the mox.conf configuration file, before converting it // Static is a parsed form of the mox.conf configuration file, before converting it
// into a mox.Config after additional processing. // into a mox.Config after additional processing.
type Static struct { type Static struct {
DataDir string `sconf-doc:"NOTE: This config file is in 'sconf' format. Indent with tabs. Comments must be on their own line, they don't end a line. Do not escape or quote strings. Details: https://pkg.go.dev/github.com/mjl-/sconf.\n\n\nDirectory where all data is stored, e.g. queue, accounts and messages, ACME TLS certs/keys. If this is a relative path, it is relative to the directory of mox.conf."` DataDir string `sconf-doc:"Directory where all data is stored, e.g. queue, accounts and messages, ACME TLS certs/keys. If this is a relative path, it is relative to the directory of mox.conf."`
LogLevel string `sconf-doc:"Default log level, one of: error, info, debug, trace, traceauth, tracedata. Trace logs SMTP and IMAP protocol transcripts, with traceauth also messages with passwords, and tracedata on top of that also the full data exchanges (full messages), which can be a large amount of data."` LogLevel string `sconf-doc:"Default log level, one of: error, info, debug, trace, traceauth, tracedata. Trace logs SMTP and IMAP protocol transcripts, with traceauth also messages with passwords, and tracedata on top of that also the full data exchanges (full messages), which can be a large amount of data."`
PackageLogLevels map[string]string `sconf:"optional" sconf-doc:"Overrides of log level per package (e.g. queue, smtpclient, smtpserver, imapserver, spf, dkim, dmarc, dmarcdb, autotls, junk, mtasts, tlsrpt)."` PackageLogLevels map[string]string `sconf:"optional" sconf-doc:"Overrides of log level per package (e.g. queue, smtpclient, smtpserver, imapserver, spf, dkim, dmarc, dmarcdb, autotls, junk, mtasts, tlsrpt)."`
User string `sconf:"optional" sconf-doc:"User to switch to after binding to all sockets as root. Default: mox. If the value is not a known user, it is parsed as integer and used as uid and gid."` User string `sconf:"optional" sconf-doc:"User to switch to after binding to all sockets as root. Default: mox. If the value is not a known user, it is parsed as integer and used as uid and gid."`
NoFixPermissions bool `sconf:"optional" sconf-doc:"If true, do not automatically fix file permissions when starting up. By default, mox will ensure reasonable owner/permissions on the working, data and config directories (and files), and mox binary (if present)."` NoFixPermissions bool `sconf:"optional" sconf-doc:"If true, do not automatically fix file permissions when starting up. By default, mox will ensure reasonable owner/permissions on the working, data and config directories (and files), and mox binary (if present)."`
Hostname string `sconf-doc:"Full hostname of system, e.g. mail.<domain>"` Hostname string `sconf-doc:"Full hostname of system, e.g. mail.<domain>"`
HostnameDomain dns.Domain `sconf:"-" json:"-"` // Parsed form of hostname. HostnameDomain dns.Domain `sconf:"-" json:"-"` // Parsed form of hostname.
CheckUpdates bool `sconf:"optional" sconf-doc:"If enabled, a single DNS TXT lookup of _updates.xmox.nl is done every 24h to check for a new release. Each time a new release is found, a changelog is fetched from https://updates.xmox.nl/changelog and delivered to the postmaster mailbox."` CheckUpdates bool `sconf:"optional" sconf-doc:"If enabled, a single DNS TXT lookup of _updates.xmox.nl is done every 24h to check for a new release. Each time a new release is found, a changelog is fetched from https://updates.xmox.nl and delivered to the postmaster mailbox."`
Pedantic bool `sconf:"optional" sconf-doc:"In pedantic mode protocol violations (that happen in the wild) for SMTP/IMAP/etc result in errors instead of accepting such behaviour."` Pedantic bool `sconf:"optional" sconf-doc:"In pedantic mode protocol violations (that happen in the wild) for SMTP/IMAP/etc result in errors instead of accepting such behaviour."`
TLS struct { TLS struct {
CA *struct { CA *struct {
@ -58,23 +53,9 @@ type Static struct {
Account string Account string
Mailbox string `sconf-doc:"E.g. Postmaster or Inbox."` Mailbox string `sconf-doc:"E.g. Postmaster or Inbox."`
} `sconf-doc:"Destination for emails delivered to postmaster addresses: a plain 'postmaster' without domain, 'postmaster@<hostname>' (also for each listener with SMTP enabled), and as fallback for each domain without explicitly configured postmaster destination."` } `sconf-doc:"Destination for emails delivered to postmaster addresses: a plain 'postmaster' without domain, 'postmaster@<hostname>' (also for each listener with SMTP enabled), and as fallback for each domain without explicitly configured postmaster destination."`
HostTLSRPT struct { DefaultMailboxes []string `sconf:"optional" sconf-doc:"Mailboxes to create when adding an account. Inbox is always created. If no mailboxes are specified, the following are automatically created: Sent, Archive, Trash, Drafts and Junk."`
Account string `sconf-doc:"Account to deliver TLS reports to. Typically same account as for postmaster."`
Mailbox string `sconf-doc:"Mailbox to deliver TLS reports to. Recommended value: TLSRPT."`
Localpart string `sconf-doc:"Localpart at hostname to accept TLS reports at. Recommended value: tlsreports."`
ParsedLocalpart smtp.Localpart `sconf:"-"` // All IPs that were explicitly listen on for external SMTP. Only set when there
} `sconf:"optional" sconf-doc:"Destination for per-host TLS reports (TLSRPT). TLS reports can be per recipient domain (for MTA-STS), or per MX host (for DANE). The per-domain TLS reporting configuration is in domains.conf. This is the TLS reporting configuration for this host. If absent, no host-based TLSRPT address is configured, and no host TLSRPT DNS record is suggested."`
InitialMailboxes InitialMailboxes `sconf:"optional" sconf-doc:"Mailboxes to create for new accounts. Inbox is always created. Mailboxes can be given a 'special-use' role, which are understood by most mail clients. If absent/empty, the following additional mailboxes are created: Sent, Archive, Trash, Drafts and Junk."`
DefaultMailboxes []string `sconf:"optional" sconf-doc:"Deprecated in favor of InitialMailboxes. Mailboxes to create when adding an account. Inbox is always created. If no mailboxes are specified, the following are automatically created: Sent, Archive, Trash, Drafts and Junk."`
Transports map[string]Transport `sconf:"optional" sconf-doc:"Transport are mechanisms for delivering messages. Transports can be referenced from Routes in accounts, domains and the global configuration. There is always an implicit/fallback delivery transport doing direct delivery with SMTP from the outgoing message queue. Transports are typically only configured when using smarthosts, i.e. when delivering through another SMTP server. Zero or one transport methods must be set in a transport, never multiple. When using an external party to send email for a domain, keep in mind you may have to add their IP address to your domain's SPF record, and possibly additional DKIM records."`
// Awkward naming of fields to get intended default behaviour for zero values.
NoOutgoingDMARCReports bool `sconf:"optional" sconf-doc:"Do not send DMARC reports (aggregate only). By default, aggregate reports on DMARC evaluations are sent to domains if their DMARC policy requests them. Reports are sent at whole hours, with a minimum of 1 hour and maximum of 24 hours, rounded up so a whole number of intervals cover 24 hours, aligned at whole days in UTC. Reports are sent from the postmaster@<mailhostname> address."`
NoOutgoingTLSReports bool `sconf:"optional" sconf-doc:"Do not send TLS reports. By default, reports about failed SMTP STARTTLS connections and related MTA-STS/DANE policies are sent to domains if their TLSRPT DNS record requests them. Reports covering a 24 hour UTC interval are sent daily. Reports are sent from the postmaster address of the configured domain the mailhostname is in. If there is no such domain, or it does not have DKIM configured, no reports are sent."`
OutgoingTLSReportsForAllSuccess bool `sconf:"optional" sconf-doc:"Also send TLS reports if there were no SMTP STARTTLS connection failures. By default, reports are only sent when at least one failure occurred. If a report is sent, it does always include the successful connection counts as well."`
QuotaMessageSize int64 `sconf:"optional" sconf-doc:"Default maximum total message size in bytes for each individual account, only applicable if greater than zero. Can be overridden per account. Attempting to add new messages to an account beyond its maximum total size will result in an error. Useful to prevent a single account from filling storage. The quota only applies to the email message files, not to any file system overhead and also not the message index database file (account for approximately 15% overhead)."`
// All IPs that were explicitly listened on for external SMTP. Only set when there
// are no unspecified external SMTP listeners and there is at most one for IPv4 and // are no unspecified external SMTP listeners and there is at most one for IPv4 and
// at most one for IPv6. Used for setting the local address when making outgoing // at most one for IPv6. Used for setting the local address when making outgoing
// connections. Those IPs are assumed to be in an SPF record for the domain, // connections. Those IPs are assumed to be in an SPF record for the domain,
@ -88,79 +69,40 @@ type Static struct {
GID uint32 `sconf:"-" json:"-"` GID uint32 `sconf:"-" json:"-"`
} }
// InitialMailboxes are mailboxes created for a new account.
type InitialMailboxes struct {
SpecialUse SpecialUseMailboxes `sconf:"optional" sconf-doc:"Special-use roles to mailbox to create."`
Regular []string `sconf:"optional" sconf-doc:"Regular, non-special-use mailboxes to create."`
}
// SpecialUseMailboxes holds mailbox names for special-use roles. Mail clients
// recognize these special-use roles, e.g. appending sent messages to whichever
// mailbox has the Sent special-use flag.
type SpecialUseMailboxes struct {
Sent string `sconf:"optional"`
Archive string `sconf:"optional"`
Trash string `sconf:"optional"`
Draft string `sconf:"optional"`
Junk string `sconf:"optional"`
}
// Dynamic is the parsed form of domains.conf, and is automatically reloaded when changed. // Dynamic is the parsed form of domains.conf, and is automatically reloaded when changed.
type Dynamic struct { type Dynamic struct {
Domains map[string]Domain `sconf-doc:"NOTE: This config file is in 'sconf' format. Indent with tabs. Comments must be on their own line, they don't end a line. Do not escape or quote strings. Details: https://pkg.go.dev/github.com/mjl-/sconf.\n\n\nDomains for which email is accepted. For internationalized domains, use their IDNA names in UTF-8."` Domains map[string]Domain `sconf-doc:"Domains for which email is accepted. For internationalized domains, use their IDNA names in UTF-8."`
Accounts map[string]Account `sconf-doc:"Accounts represent mox users, each with a password and email address(es) to which email can be delivered (possibly at different domains). Each account has its own on-disk directory holding its messages and index database. An account name is not an email address."` Accounts map[string]Account `sconf-doc:"Accounts to which email can be delivered. An account can accept email for multiple domains, for multiple localparts, and deliver to multiple mailboxes."`
WebDomainRedirects map[string]string `sconf:"optional" sconf-doc:"Redirect all requests from domain (key) to domain (value). Always redirects to HTTPS. For plain HTTP redirects, use a WebHandler with a WebRedirect."` WebDomainRedirects map[string]string `sconf:"optional" sconf-doc:"Redirect all requests from domain (key) to domain (value). Always redirects to HTTPS. For plain HTTP redirects, use a WebHandler with a WebRedirect."`
WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting, reverse-proxying HTTP(s) or passing the request to an internal service. The first matching WebHandler will handle the request. Built-in system handlers, e.g. for ACME validation, autoconfig and mta-sts always run first. Built-in handlers for admin, account, webmail and webapi are evaluated after all handlers, including webhandlers (allowing for overrides of internal services for some domains). If no handler matches, the response status code is file not found (404). If webserver features are missing, forward the requests to an application that provides the needed functionality itself."` WebHandlers []WebHandler `sconf:"optional" sconf-doc:"Handle webserver requests by serving static files, redirecting or reverse-proxying HTTP(s). The first matching WebHandler will handle the request. Built-in handlers, e.g. for account, admin, autoconfig and mta-sts always run first. If no handler matches, the response status code is file not found (404). If functionality you need is missng, simply forward the requests to an application that can provide the needed functionality."`
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, domain routes and finally these global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
MonitorDNSBLs []string `sconf:"optional" sconf-doc:"DNS blocklists to periodically check with if IPs we send from are present, without using them for checking incoming deliveries.. Also see DNSBLs in SMTP listeners in mox.conf, which specifies DNSBLs to use both for incoming deliveries and for checking our IPs against. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net."`
WebDNSDomainRedirects map[dns.Domain]dns.Domain `sconf:"-" json:"-"` WebDNSDomainRedirects map[dns.Domain]dns.Domain `sconf:"-"`
MonitorDNSBLZones []dns.Domain `sconf:"-"`
ClientSettingDomains map[dns.Domain]struct{} `sconf:"-" json:"-"`
} }
type ACME struct { type ACME struct {
DirectoryURL string `sconf-doc:"For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory."` DirectoryURL string `sconf-doc:"For letsencrypt, use https://acme-v02.api.letsencrypt.org/directory."`
RenewBefore time.Duration `sconf:"optional" sconf-doc:"How long before expiration to renew the certificate. Default is 30 days."` RenewBefore time.Duration `sconf:"optional" sconf-doc:"How long before expiration to renew the certificate. Default is 30 days."`
ContactEmail string `sconf-doc:"Email address to register at ACME provider. The provider can email you when certificates are about to expire. If you configure an address for which email is delivered by this server, keep in mind that TLS misconfigurations could result in such notification emails not arriving."` ContactEmail string `sconf-doc:"Email address to register at ACME provider. The provider can email you when certificates are about to expire. If you configure an address for which email is delivered by this server, keep in mind that TLS misconfigurations could result in such notification emails not arriving."`
Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the tls connection here, e.g. by configuring firewall-level port forwarding. Validation over the https port uses tls-alpn-01 with application-layer protocol negotiation, which essentially means the original tls connection must make it here unmodified, an https reverse proxy will not work."` Port int `sconf:"optional" sconf-doc:"TLS port for ACME validation, 443 by default. You should only override this if you cannot listen on port 443 directly. ACME will make requests to port 443, so you'll have to add an external mechanism to get the connection here, e.g. by configuring port forwarding."`
IssuerDomainName string `sconf:"optional" sconf-doc:"If set, used for suggested CAA DNS records, for restricting TLS certificate issuance to a Certificate Authority. If empty and DirectyURL is for Let's Encrypt, this value is set automatically to letsencrypt.org."`
ExternalAccountBinding *ExternalAccountBinding `sconf:"optional" sconf-doc:"ACME providers can require that a request for a new ACME account reference an existing non-ACME account known to the provider. External account binding references that account by a key id, and authorizes new ACME account requests by signing it with a key known both by the ACME client and ACME provider."`
// ../rfc/8555:2111
Manager *autotls.Manager `sconf:"-" json:"-"` Manager *autotls.Manager `sconf:"-" json:"-"`
} }
type ExternalAccountBinding struct {
KeyID string `sconf-doc:"Key identifier, from ACME provider."`
KeyFile string `sconf-doc:"File containing the base64url-encoded key used to sign account requests with external account binding. The ACME provider will verify the account request is correctly signed by the key. File is evaluated relative to the directory of mox.conf."`
}
type Listener struct { type Listener struct {
IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but it is better to explicitly specify the IPs you want to use for email, as mox will make sure outgoing connections will only be made from one of those IPs. If both outgoing IPv4 and IPv6 connectivity is possible, and only one family has explicitly configured addresses, both address families are still used for outgoing connections. Use the \"direct\" transport to limit address families for outgoing connections."` IPs []string `sconf-doc:"Use 0.0.0.0 to listen on all IPv4 and/or :: to listen on all IPv6 addresses, but it is better to explicitly specify the IPs you want to use for email, as mox will make sure outgoing connections will only be made from one of those IPs."`
NATIPs []string `sconf:"optional" sconf-doc:"If set, the mail server is configured behind a NAT and field IPs are internal instead of the public IPs, while NATIPs lists the public IPs. Used during IP-related DNS self-checks, such as for iprev, mx, spf, autoconfig, autodiscover, and for autotls."` IPsNATed bool `sconf:"optional" sconf-doc:"Set this if the specified IPs are not the public IPs, but are NATed. This makes the DNS check skip a few checks related to IPs, such as for iprev, mx, spf, autoconfig, autodiscover."`
IPsNATed bool `sconf:"optional" sconf-doc:"Deprecated, use NATIPs instead. If set, IPs are not the public IPs, but are NATed. Skips IP-related DNS self-checks."` Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used."`
Hostname string `sconf:"optional" sconf-doc:"If empty, the config global Hostname is used. The internal services webadmin, webaccount, webmail and webapi only match requests to IPs, this hostname, \"localhost\". All except webadmin also match for any client settings domain."`
HostnameDomain dns.Domain `sconf:"-" json:"-"` // Set when parsing config. HostnameDomain dns.Domain `sconf:"-" json:"-"` // Set when parsing config.
TLS *TLS `sconf:"optional" sconf-doc:"For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections."` TLS *TLS `sconf:"optional" sconf-doc:"For SMTP/IMAP STARTTLS, direct TLS and HTTPS connections."`
SMTPMaxMessageSize int64 `sconf:"optional" sconf-doc:"Maximum size in bytes for incoming and outgoing messages. Default is 100MB."` SMTPMaxMessageSize int64 `sconf:"optional" sconf-doc:"Maximum size in bytes accepted incoming and outgoing messages. Default is 100MB."`
SMTP struct { SMTP struct {
Enabled bool Enabled bool
Port int `sconf:"optional" sconf-doc:"Default 25."` Port int `sconf:"optional" sconf-doc:"Default 25."`
NoSTARTTLS bool `sconf:"optional" sconf-doc:"Do not offer STARTTLS to secure the connection. Not recommended."` NoSTARTTLS bool `sconf:"optional" sconf-doc:"Do not offer STARTTLS to secure the connection. Not recommended."`
RequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not accept incoming messages if STARTTLS is not active. Consider using in combination with an MTA-STS policy and/or DANE. A remote SMTP server may not support TLS and may not be able to deliver messages. Incoming messages for TLS reporting addresses ignore this setting and do not require TLS."` RequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not accept incoming messages if STARTTLS is not active. Can be used in combination with a strict MTA-STS policy. A remote SMTP server may not support TLS and may not be able to deliver messages."`
NoRequireTLS bool `sconf:"optional" sconf-doc:"Do not announce the REQUIRETLS SMTP extension. Messages delivered using the REQUIRETLS extension should only be distributed onwards to servers also implementing the REQUIRETLS extension. In some situations, such as hosting mailing lists, this may not be feasible due to lack of support for the extension by mailing list subscribers."` DNSBLs []string `sconf:"optional" sconf-doc:"Addresses of DNS block lists for incoming messages. Block lists are only consulted for connections/messages without enough reputation to make an accept/reject decision. This prevents sending IPs of all communications to the block list provider. If any of the listed DNSBLs contains a requested IP address, the message is rejected as spam. The DNSBLs are checked for healthiness before use, at most once per 4 hours. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net"`
// Reoriginated messages (such as messages sent to mailing list subscribers) should DNSBLZones []dns.Domain `sconf:"-"`
// keep REQUIRETLS. ../rfc/8689:412
DNSBLs []string `sconf:"optional" sconf-doc:"Addresses of DNS block lists for incoming messages. Block lists are only consulted for connections/messages without enough reputation to make an accept/reject decision. This prevents sending IPs of all communications to the block list provider. If any of the listed DNSBLs contains a requested IP address, the message is rejected as spam. The DNSBLs are checked for healthiness before use, at most once per 4 hours. IPs we can send from are periodically checked for being in the configured DNSBLs. See MonitorDNSBLs in domains.conf to only monitor IPs we send from, without using those DNSBLs for incoming messages. Example DNSBLs: sbl.spamhaus.org, bl.spamcop.net. See https://www.spamhaus.org/sbl/ and https://www.spamcop.net/ for more information and terms of use."`
FirstTimeSenderDelay *time.Duration `sconf:"optional" sconf-doc:"Delay before accepting a message from a first-time sender for the destination account. Default: 15s."`
TLSSessionTicketsDisabled *bool `sconf:"optional" sconf-doc:"Override default setting for enabling TLS session tickets. Disabling session tickets may work around TLS interoperability issues."`
DNSBLZones []dns.Domain `sconf:"-"`
} `sconf:"optional"` } `sconf:"optional"`
Submission struct { Submission struct {
Enabled bool Enabled bool
@ -168,9 +110,8 @@ type Listener struct {
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not require STARTTLS. Since users must login, this means password may be sent without encryption. Not recommended."` NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Do not require STARTTLS. Since users must login, this means password may be sent without encryption. Not recommended."`
} `sconf:"optional" sconf-doc:"SMTP for submitting email, e.g. by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which is always a TLS connection."` } `sconf:"optional" sconf-doc:"SMTP for submitting email, e.g. by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using Submissions which is always a TLS connection."`
Submissions struct { Submissions struct {
Enabled bool Enabled bool
Port int `sconf:"optional" sconf-doc:"Default 465."` Port int `sconf:"optional" sconf-doc:"Default 465."`
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable submission on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'smtp' protocol after TLS, it will be able to talk SMTP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
} `sconf:"optional" sconf-doc:"SMTP over TLS for submitting email, by email applications. Requires a TLS config."` } `sconf:"optional" sconf-doc:"SMTP over TLS for submitting email, by email applications. Requires a TLS config."`
IMAP struct { IMAP struct {
Enabled bool Enabled bool
@ -178,19 +119,30 @@ type Listener struct {
NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Enable this only when the connection is otherwise encrypted (e.g. through a VPN)."` NoRequireSTARTTLS bool `sconf:"optional" sconf-doc:"Enable this only when the connection is otherwise encrypted (e.g. through a VPN)."`
} `sconf:"optional" sconf-doc:"IMAP for reading email, by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is always a TLS connection."` } `sconf:"optional" sconf-doc:"IMAP for reading email, by email applications. Starts out in plain text, can be upgraded to TLS with the STARTTLS command. Prefer using IMAPS instead which is always a TLS connection."`
IMAPS struct { IMAPS struct {
Enabled bool Enabled bool
Port int `sconf:"optional" sconf-doc:"Default 993."` Port int `sconf:"optional" sconf-doc:"Default 993."`
EnabledOnHTTPS bool `sconf:"optional" sconf-doc:"Additionally enable IMAP on HTTPS port 443 via TLS ALPN. TLS Application Layer Protocol Negotiation allows clients to request a specific protocol from the server as part of the TLS connection setup. When this setting is enabled and a client requests the 'imap' protocol after TLS, it will be able to talk IMAP to Mox on port 443. This is meant to be useful as a censorship circumvention technique for Delta Chat."`
} `sconf:"optional" sconf-doc:"IMAP over TLS for reading email, by email applications. Requires a TLS config."` } `sconf:"optional" sconf-doc:"IMAP over TLS for reading email, by email applications. Requires a TLS config."`
AccountHTTP WebService `sconf:"optional" sconf-doc:"Account web interface, for email users wanting to change their accounts, e.g. set new password, set new delivery rulesets. Default path is /."` AccountHTTP struct {
AccountHTTPS WebService `sconf:"optional" sconf-doc:"Account web interface listener like AccountHTTP, but for HTTPS. Requires a TLS config."` Enabled bool
AdminHTTP WebService `sconf:"optional" sconf-doc:"Admin web interface, for managing domains, accounts, etc. Default path is /admin/. Preferably only enable on non-public IPs. Hint: use 'ssh -L 8080:localhost:80 you@yourmachine' and open http://localhost:8080/admin/, or set up a tunnel (e.g. WireGuard) and add its IP to the mox 'internal' listener."` Port int `sconf:"optional" sconf-doc:"Default 80."`
AdminHTTPS WebService `sconf:"optional" sconf-doc:"Admin web interface listener like AdminHTTP, but for HTTPS. Requires a TLS config."` Path string `sconf:"optional" sconf-doc:"Path to serve account requests on, e.g. /mox/. Useful if domain serves other resources. Default is /."`
WebmailHTTP WebService `sconf:"optional" sconf-doc:"Webmail client, for reading email. Default path is /webmail/."` } `sconf:"optional" sconf-doc:"Account web interface, for email users wanting to change their accounts, e.g. set new password, set new delivery rulesets. Served at /."`
WebmailHTTPS WebService `sconf:"optional" sconf-doc:"Webmail client, like WebmailHTTP, but for HTTPS. Requires a TLS config."` AccountHTTPS struct {
WebAPIHTTP WebService `sconf:"optional" sconf-doc:"Like WebAPIHTTP, but with plain HTTP, without TLS."` Enabled bool
WebAPIHTTPS WebService `sconf:"optional" sconf-doc:"WebAPI, a simple HTTP/JSON-based API for email, with HTTPS (requires a TLS config). Default path is /webapi/."` Port int `sconf:"optional" sconf-doc:"Default 80."`
MetricsHTTP struct { Path string `sconf:"optional" sconf-doc:"Path to serve account requests on, e.g. /mox/. Useful if domain serves other resources. Default is /."`
} `sconf:"optional" sconf-doc:"Account web interface listener for HTTPS. Requires a TLS config."`
AdminHTTP struct {
Enabled bool
Port int `sconf:"optional" sconf-doc:"Default 80."`
Path string `sconf:"optional" sconf-doc:"Path to serve admin requests on, e.g. /moxadmin/. Useful if domain serves other resources. Default is /admin/."`
} `sconf:"optional" sconf-doc:"Admin web interface, for managing domains, accounts, etc. Served at /admin/. Preferably only enable on non-public IPs. Hint: use 'ssh -L 8080:localhost:80 you@yourmachine' and open http://localhost:8080/admin/, or set up a tunnel (e.g. WireGuard) and add its IP to the mox 'internal' listener."`
AdminHTTPS struct {
Enabled bool
Port int `sconf:"optional" sconf-doc:"Default 443."`
Path string `sconf:"optional" sconf-doc:"Path to serve admin requests on, e.g. /moxadmin/. Useful if domain serves other resources. Default is /admin/."`
} `sconf:"optional" sconf-doc:"Admin web interface listener for HTTPS. Requires a TLS config. Preferably only enable on non-public IPs."`
MetricsHTTP struct {
Enabled bool Enabled bool
Port int `sconf:"optional" sconf-doc:"Default 8010."` Port int `sconf:"optional" sconf-doc:"Default 8010."`
} `sconf:"optional" sconf-doc:"Serve prometheus metrics, for monitoring. You should not enable this on a public IP."` } `sconf:"optional" sconf-doc:"Serve prometheus metrics, for monitoring. You should not enable this on a public IP."`
@ -209,177 +161,64 @@ type Listener struct {
NonTLS bool `sconf:"optional" sconf-doc:"If set, plain HTTP instead of HTTPS is spoken on the configured port. Can be useful when the mta-sts domain is reverse proxied."` NonTLS bool `sconf:"optional" sconf-doc:"If set, plain HTTP instead of HTTPS is spoken on the configured port. Can be useful when the mta-sts domain is reverse proxied."`
} `sconf:"optional" sconf-doc:"Serve MTA-STS policies describing SMTP TLS requirements. Requires a TLS config."` } `sconf:"optional" sconf-doc:"Serve MTA-STS policies describing SMTP TLS requirements. Requires a TLS config."`
WebserverHTTP struct { WebserverHTTP struct {
Enabled bool Enabled bool
Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."` Port int `sconf:"optional" sconf-doc:"Port for plain HTTP (non-TLS) webserver."`
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener."` } `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener."`
WebserverHTTPS struct { WebserverHTTPS struct {
Enabled bool Enabled bool
Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."` Port int `sconf:"optional" sconf-doc:"Port for HTTPS webserver."`
RateLimitDisabled bool `sconf:"optional" sconf-doc:"Disable rate limiting for all requests to this port."`
} `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener. Either ACME must be configured, or for each WebHandler domain a TLS certificate must be configured."` } `sconf:"optional" sconf-doc:"All configured WebHandlers will serve on an enabled listener. Either ACME must be configured, or for each WebHandler domain a TLS certificate must be configured."`
} }
// WebService is an internal web interface: webmail, webaccount, webadmin, webapi.
type WebService struct {
Enabled bool
Port int `sconf:"optional" sconf-doc:"Default 80 for HTTP and 443 for HTTPS. See Hostname at Listener for hostname matching behaviour."`
Path string `sconf:"optional" sconf-doc:"Path to serve requests on. Should end with a slash, related to cookie paths."`
Forwarded bool `sconf:"optional" sconf-doc:"If set, X-Forwarded-* headers are used for the remote IP address for rate limiting and for the \"secure\" status of cookies."`
}
// Transport is a method to delivery a message. At most one of the fields can
// be non-nil. The non-nil field represents the type of transport. For a
// transport with all fields nil, regular email delivery is done.
type Transport struct {
Submissions *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a TLS connection to submit email to a remote queue."`
Submission *TransportSMTP `sconf:"optional" sconf-doc:"Submission SMTP over a plain TCP connection (possibly with STARTTLS) to submit email to a remote queue."`
SMTP *TransportSMTP `sconf:"optional" sconf-doc:"SMTP over a plain connection (possibly with STARTTLS), typically for old-fashioned unauthenticated relaying to a remote queue."`
Socks *TransportSocks `sconf:"optional" sconf-doc:"Like regular direct delivery, but makes outgoing connections through a SOCKS proxy."`
Direct *TransportDirect `sconf:"optional" sconf-doc:"Like regular direct delivery, but allows to tweak outgoing connections."`
Fail *TransportFail `sconf:"optional" sconf-doc:"Immediately fails the delivery attempt."`
}
// TransportSMTP delivers messages by "submission" (SMTP, typically
// authenticated) to the queue of a remote host (smarthost), or by relaying
// (SMTP, typically unauthenticated).
type TransportSMTP struct {
Host string `sconf-doc:"Host name to connect to and for verifying its TLS certificate."`
Port int `sconf:"optional" sconf-doc:"If unset or 0, the default port for submission(s)/smtp is used: 25 for SMTP, 465 for submissions (with TLS), 587 for submission (possibly with STARTTLS)."`
STARTTLSInsecureSkipVerify bool `sconf:"optional" sconf-doc:"If set an unverifiable remote TLS certificate during STARTTLS is accepted."`
NoSTARTTLS bool `sconf:"optional" sconf-doc:"If set for submission or smtp transport, do not attempt STARTTLS on the connection. Authentication credentials and messages will be transferred in clear text."`
Auth *SMTPAuth `sconf:"optional" sconf-doc:"If set, authentication credentials for the remote server."`
DNSHost dns.Domain `sconf:"-" json:"-"`
}
// SMTPAuth hold authentication credentials used when delivering messages
// through a smarthost.
type SMTPAuth struct {
Username string
Password string
Mechanisms []string `sconf:"optional" sconf-doc:"Allowed authentication mechanisms. Defaults to SCRAM-SHA-256-PLUS, SCRAM-SHA-256, SCRAM-SHA-1-PLUS, SCRAM-SHA-1, CRAM-MD5. Not included by default: PLAIN. Specify the strongest mechanism known to be implemented by the server to prevent mechanism downgrade attacks."`
EffectiveMechanisms []string `sconf:"-" json:"-"`
}
type TransportSocks struct {
Address string `sconf-doc:"Address of SOCKS proxy, of the form host:port or ip:port."`
RemoteIPs []string `sconf-doc:"IP addresses connections from the SOCKS server will originate from. This IP addresses should be configured in the SPF record (keep in mind DNS record time to live (TTL) when adding a SOCKS proxy). Reverse DNS should be set up for these address, resolving to RemoteHostname. These are typically the IPv4 and IPv6 address for the host in the Address field."`
RemoteHostname string `sconf-doc:"Hostname belonging to RemoteIPs. This name is used during in SMTP EHLO. This is typically the hostname of the host in the Address field."`
// todo: add authentication credentials?
IPs []net.IP `sconf:"-" json:"-"` // Parsed form of RemoteIPs.
Hostname dns.Domain `sconf:"-" json:"-"` // Parsed form of RemoteHostname
}
type TransportDirect struct {
DisableIPv4 bool `sconf:"optional" sconf-doc:"If set, outgoing SMTP connections will *NOT* use IPv4 addresses to connect to remote SMTP servers."`
DisableIPv6 bool `sconf:"optional" sconf-doc:"If set, outgoing SMTP connections will *NOT* use IPv6 addresses to connect to remote SMTP servers."`
IPFamily string `sconf:"-" json:"-"`
}
// TransportFail is a transport that fails all delivery attempts.
type TransportFail struct {
SMTPCode int `sconf:"optional" sconf-doc:"SMTP error code and optional enhanced error code to use for the failure. If empty, 554 is used (transaction failed)."`
SMTPMessage string `sconf:"optional" sconf-doc:"Message to include for the rejection. It will be shown in the DSN."`
// Effective values to use, set when parsing.
Code int `sconf:"-"`
Message string `sconf:"-"`
}
type Domain struct { type Domain struct {
Disabled bool `sconf:"optional" sconf-doc:"Disabled domains can be useful during/before migrations. Domains that are disabled can still be configured like normal, including adding addresses using the domain to accounts. However, disabled domains: 1. Do not try to fetch ACME certificates. TLS connections to host names involving the email domain will fail. A TLS certificate for the hostname (that wil be used as MX) itself will be requested. 2. Incoming deliveries over SMTP are rejected with a temporary error '450 4.2.1 recipient domain temporarily disabled'. 3. Submissions over SMTP using an (envelope) SMTP MAIL FROM address or message 'From' address of a disabled domain will be rejected with a temporary error '451 4.3.0 sender domain temporarily disabled'. Note that accounts with addresses at disabled domains can still log in and read email (unless the account itself is disabled)."` Description string `sconf:"optional" sconf-doc:"Free-form description of domain."`
Description string `sconf:"optional" sconf-doc:"Free-form description of domain."` LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."`
ClientSettingsDomain string `sconf:"optional" sconf-doc:"Hostname for client settings instead of the mail server hostname. E.g. mail.<domain>. For future migration to another mail operator without requiring all clients to update their settings, it is convenient to have client settings that reference a subdomain of the hosted domain instead of the hostname of the server where the mail is currently hosted. If empty, the hostname of the mail server is used for client configurations. Unicode name."` LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."`
LocalpartCatchallSeparator string `sconf:"optional" sconf-doc:"If not empty, only the string before the separator is used to for email delivery decisions. For example, if set to \"+\", you+anything@example.com will be delivered to you@example.com."` DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."`
LocalpartCatchallSeparators []string `sconf:"optional" sconf-doc:"Similar to LocalpartCatchallSeparator, but in case multiple are needed. For example both \"+\" and \"-\". Only of one LocalpartCatchallSeparator or LocalpartCatchallSeparators can be set. If set, the first separator is used to make unique addresses for outgoing SMTP connections with FromIDLoginAddresses."` DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
LocalpartCaseSensitive bool `sconf:"optional" sconf-doc:"If set, upper/lower case is relevant for email delivery."` MTASTS *MTASTS `sconf:"optional" sconf-doc:"With MTA-STS a domain publishes, in DNS, presence of a policy for using/requiring TLS for SMTP connections. The policy is served over HTTPS."`
DKIM DKIM `sconf:"optional" sconf-doc:"With DKIM signing, a domain is taking responsibility for (content of) emails it sends, letting receiving mail servers build up a (hopefully positive) reputation of the domain, which can help with mail delivery."` TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
DMARC *DMARC `sconf:"optional" sconf-doc:"With DMARC, a domain publishes, in DNS, a policy on how other mail servers should handle incoming messages with the From-header matching this domain and/or subdomain (depending on the configured alignment). Receiving mail servers use this to build up a reputation of this domain, which can help with mail delivery. A domain can also publish an email address to which reports about DMARC verification results can be sent by verifying mail servers, useful for monitoring. Incoming DMARC reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
MTASTS *MTASTS `sconf:"optional" sconf-doc:"MTA-STS is a mechanism that allows publishing a policy with requirements for WebPKI-verified SMTP STARTTLS connections for email delivered to a domain. Existence of a policy is announced in a DNS TXT record (often unprotected/unverified, MTA-STS's weak spot). If a policy exists, it is fetched with a WebPKI-verified HTTPS request. The policy can indicate that WebPKI-verified SMTP STARTTLS is required, and which MX hosts (optionally with a wildcard pattern) are allowd. MX hosts to deliver to are still taken from DNS (again, not necessarily protected/verified), but messages will only be delivered to domains matching the MX hosts from the published policy. Mail servers look up the MTA-STS policy when first delivering to a domain, then keep a cached copy, periodically checking the DNS record if a new policy is available, and fetching and caching it if so. To update a policy, first serve a new policy with an updated policy ID, then update the DNS record (not the other way around). To remove an enforced policy, publish an updated policy with mode \"none\" for a long enough period so all cached policies have been refreshed (taking DNS TTL and policy max age into account), then remove the policy from DNS, wait for TTL to expire, and stop serving the policy."`
TLSRPT *TLSRPT `sconf:"optional" sconf-doc:"With TLSRPT a domain specifies in DNS where reports about encountered SMTP TLS behaviour should be sent. Useful for monitoring. Incoming TLS reports are automatically parsed, validated, added to metrics and stored in the reporting database for later display in the admin web pages."`
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates account routes, these domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
Aliases map[string]Alias `sconf:"optional" sconf-doc:"Aliases that cause messages to be delivered to one or more locally configured addresses. Keys are localparts (encoded, as they appear in email addresses)."`
Domain dns.Domain `sconf:"-"` Domain dns.Domain `sconf:"-" json:"-"`
ClientSettingsDNSDomain dns.Domain `sconf:"-" json:"-"`
// Set when DMARC and TLSRPT (when set) has an address with different domain (we're
// hosting the reporting), and there are no destination addresses configured for
// the domain. Disables some functionality related to hosting a domain.
ReportsOnly bool `sconf:"-" json:"-"`
LocalpartCatchallSeparatorsEffective []string `sconf:"-"` // Either LocalpartCatchallSeparators, the value of LocalpartCatchallSeparator, or empty.
}
// todo: allow external addresses as members of aliases. we would add messages for them to the queue for outgoing delivery. we should require an admin addresses to which delivery failures will be delivered (locally, and to use in smtp mail from, so dsns go there). also take care to evaluate smtputf8 (if external address requires utf8 and incoming transaction didn't).
// todo: as alternative to PostPublic, allow specifying a list of addresses (dmarc-like verified) that are (the only addresses) allowed to post to the list. if msgfrom is an external address, require a valid dkim signature to prevent dmarc-policy-related issues when delivering to remote members.
// todo: add option to require messages sent to an alias have that alias as From or Reply-To address?
type Alias struct {
Addresses []string `sconf-doc:"Expanded addresses to deliver to. These must currently be of addresses of local accounts. To prevent duplicate messages, a member address that is also an explicit recipient in the SMTP transaction will only have the message delivered once. If the address in the message From header is a member, that member also won't receive the message."`
PostPublic bool `sconf:"optional" sconf-doc:"If true, anyone can send messages to the list. Otherwise only members, based on message From address, which is assumed to be DMARC-like-verified."`
ListMembers bool `sconf:"optional" sconf-doc:"If true, members can see addresses of members."`
AllowMsgFrom bool `sconf:"optional" sconf-doc:"If true, members are allowed to send messages with this alias address in the message From header."`
LocalpartStr string `sconf:"-"` // In encoded form.
Domain dns.Domain `sconf:"-"`
ParsedAddresses []AliasAddress `sconf:"-"` // Matches addresses.
}
type AliasAddress struct {
Address smtp.Address // Parsed address.
AccountName string // Looked up.
Destination Destination // Belonging to address.
} }
type DMARC struct { type DMARC struct {
Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarcreports."` Localpart string `sconf-doc:"Address-part before the @ that accepts DMARC reports. Must be non-internationalized. Recommended value: dmarc-reports."`
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the DMARC DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
Account string `sconf-doc:"Account to deliver to."` Account string `sconf-doc:"Account to deliver to."`
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. DMARC."` Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. DMARC."`
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility. ParsedLocalpart smtp.Localpart `sconf:"-"`
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
} }
type MTASTS struct { type MTASTS struct {
PolicyID string `sconf-doc:"Policies are versioned. The version must be specified in the DNS record. If you change a policy, first change it here to update the served policy, then update the DNS record with the updated policy ID."` PolicyID string `sconf-doc:"Policies are versioned. The version must be specified in the DNS record. If you change a policy, first change it in mox, then update the DNS record."`
Mode mtasts.Mode `sconf-doc:"If set to \"enforce\", a remote SMTP server will not deliver email to us if it cannot make a WebPKI-verified SMTP STARTTLS connection. In mode \"testing\", deliveries can be done without verified TLS, but errors will be reported through TLS reporting. In mode \"none\", verified TLS is not required, used for phasing out an MTA-STS policy."` Mode mtasts.Mode `sconf-doc:"testing, enforce or none. If set to enforce, a remote SMTP server will not deliver email to us if it cannot make a TLS connection."`
MaxAge time.Duration `sconf-doc:"How long a remote mail server is allowed to cache a policy. Typically 1 or several weeks."` MaxAge time.Duration `sconf-doc:"How long a remote mail server is allowed to cache a policy. Typically 1 or several weeks."`
MX []string `sconf:"optional" sconf-doc:"List of server names allowed for SMTP. If empty, the configured hostname is set. Host names can contain a wildcard (*) as a leading label (matching a single label, e.g. *.example matches host.example, not sub.host.example)."` MX []string `sconf:"optional" sconf-doc:"List of server names allowed for SMTP. If empty, the configured hostname is set. Host names can contain a wildcard (*) as a leading label (matching a single label, e.g. *.example matches host.example, not sub.host.example)."`
// todo: parse mx as valid mtasts.Policy.MX, with dns.ParseDomain but taking wildcard into account // todo: parse mx as valid mtasts.Policy.MX, with dns.ParseDomain but taking wildcard into account
} }
type TLSRPT struct { type TLSRPT struct {
Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tlsreports."` Localpart string `sconf-doc:"Address-part before the @ that accepts TLSRPT reports. Recommended value: tls-reports."`
Domain string `sconf:"optional" sconf-doc:"Alternative domain for reporting address, for incoming reports. Typically empty, causing the domain wherein this config exists to be used. Can be used to receive reports for domains that aren't fully hosted on this server. Configure such a domain as a hosted domain without making all the DNS changes, and configure this field with a domain that is fully hosted on this server, so the localpart and the domain of this field form a reporting address. Then only update the TLSRPT DNS record for the not fully hosted domain, ensuring the reporting address is specified in its \"rua\" field as shown in the suggested DNS settings. Unicode name."`
Account string `sconf-doc:"Account to deliver to."` Account string `sconf-doc:"Account to deliver to."`
Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. TLSRPT."` Mailbox string `sconf-doc:"Mailbox to deliver to, e.g. TLSRPT."`
ParsedLocalpart smtp.Localpart `sconf:"-"` // Lower-case if case-sensitivity is not configured for domain. Not "canonical" for catchall separators for backwards compatibility. ParsedLocalpart smtp.Localpart `sconf:"-"`
DNSDomain dns.Domain `sconf:"-"` // Effective domain, always set based on Domain field or Domain where this is configured.
}
type Canonicalization struct {
HeaderRelaxed bool `sconf-doc:"If set, some modifications to the headers (mostly whitespace) are allowed."`
BodyRelaxed bool `sconf-doc:"If set, some whitespace modifications to the message body are allowed."`
} }
type Selector struct { type Selector struct {
Hash string `sconf:"optional" sconf-doc:"sha256 (default) or (older, not recommended) sha1."` Hash string `sconf:"optional" sconf-doc:"sha256 (default) or (older, not recommended) sha1"`
HashEffective string `sconf:"-"` HashEffective string `sconf:"-"`
Canonicalization Canonicalization `sconf:"optional"` Canonicalization struct {
Headers []string `sconf:"optional" sconf-doc:"Headers to sign with DKIM. If empty, a reasonable default set of headers is selected."` HeaderRelaxed bool `sconf-doc:"If set, some modifications to the headers (mostly whitespace) are allowed."`
HeadersEffective []string `sconf:"-"` // Used when signing. Based on Headers from config, or the reasonable default. BodyRelaxed bool `sconf-doc:"If set, some whitespace modifications to the message body are allowed."`
DontSealHeaders bool `sconf:"optional" sconf-doc:"If set, don't prevent duplicate headers from being added. Not recommended."` } `sconf:"optional"`
Expiration string `sconf:"optional" sconf-doc:"Period a signature is valid after signing, as duration, e.g. 72h. The period should be enough for delivery at the final destination, potentially with several hops/relays. In the order of days at least."` Headers []string `sconf:"optional" sconf-doc:"Headers to sign with DKIM. If empty, a reasonable default set of headers is selected."`
PrivateKeyFile string `sconf-doc:"Either an RSA or ed25519 private key file in PKCS8 PEM form."` HeadersEffective []string `sconf:"-"`
DontSealHeaders bool `sconf:"optional" sconf-doc:"If set, don't prevent duplicate headers from being added. Not recommended."`
Expiration string `sconf:"optional" sconf-doc:"Period a signature is valid after signing, as duration, e.g. 72h. The period should be enough for delivery at the final destination, potentially with several hops/relays. In the order of days at least."`
PrivateKeyFile string `sconf-doc:"Either an RSA or ed25519 private key file in PKCS8 PEM form."`
Algorithm string `sconf:"-"` // "ed25519", "rsa-*", based on private key.
ExpirationSeconds int `sconf:"-" json:"-"` // Parsed from Expiration. ExpirationSeconds int `sconf:"-" json:"-"` // Parsed from Expiration.
Key crypto.Signer `sconf:"-" json:"-"` // As parsed with x509.ParsePKCS8PrivateKey. Key crypto.Signer `sconf:"-" json:"-"` // As parsed with x509.ParsePKCS8PrivateKey.
Domain dns.Domain `sconf:"-" json:"-"` // Of selector only, not FQDN. Domain dns.Domain `sconf:"-" json:"-"` // Of selector only, not FQDN.
@ -390,81 +229,28 @@ type DKIM struct {
Sign []string `sconf:"optional" sconf-doc:"List of selectors that emails will be signed with."` Sign []string `sconf:"optional" sconf-doc:"List of selectors that emails will be signed with."`
} }
type Route struct {
FromDomain []string `sconf:"optional" sconf-doc:"Matches if the envelope from domain matches one of the configured domains, or if the list is empty. If a domain starts with a dot, prefixes of the domain also match."`
ToDomain []string `sconf:"optional" sconf-doc:"Like FromDomain, but matching against the envelope to domain."`
MinimumAttempts int `sconf:"optional" sconf-doc:"Matches if at least this many deliveries have already been attempted. This can be used to attempt sending through a smarthost when direct delivery has failed for several times."`
Transport string `sconf:"The transport used for delivering the message that matches requirements of the above fields."`
// todo future: add ToMX, where we look up the MX record of the destination domain and check (the first, any, all?) mx host against the values in ToMX.
FromDomainASCII []string `sconf:"-"`
ToDomainASCII []string `sconf:"-"`
ResolvedTransport Transport `sconf:"-" json:"-"`
}
// todo: move RejectsMailbox to store.Mailbox.SpecialUse, possibly with "X" prefix?
// note: outgoing hook events are in ../queue/hooks.go, ../mox-/config.go, ../queue.go and ../webapi/gendoc.sh. keep in sync.
type OutgoingWebhook struct {
URL string `sconf-doc:"URL to POST webhooks."`
Authorization string `sconf:"optional" sconf-doc:"If not empty, value of Authorization header to add to HTTP requests."`
Events []string `sconf:"optional" sconf-doc:"Events to send outgoing delivery notifications for. If absent, all events are sent. Valid values: delivered, suppressed, delayed, failed, relayed, expanded, canceled, unrecognized."`
}
type IncomingWebhook struct {
URL string `sconf-doc:"URL to POST webhooks to for incoming deliveries over SMTP."`
Authorization string `sconf:"optional" sconf-doc:"If not empty, value of Authorization header to add to HTTP requests."`
}
type SubjectPass struct {
Period time.Duration `sconf-doc:"How long unique values are accepted after generating, e.g. 12h."` // todo: have a reasonable default for this?
}
type AutomaticJunkFlags struct {
Enabled bool `sconf-doc:"If enabled, junk/nonjunk flags will be set automatically if they match some of the regular expressions. When two of the three mailbox regular expressions are set, the remaining one will match all unmatched messages. Messages are matched in the order 'junk', 'neutral', 'not junk', and the search stops on the first match. Mailboxes are lowercased before matching."`
JunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(junk|spam)."`
NeutralMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(inbox|neutral|postmaster|dmarc|tlsrpt|rejects), and you may wish to add trash depending on how you use it, or leave this empty."`
NotJunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: .* or an empty string."`
}
type Account struct { type Account struct {
OutgoingWebhook *OutgoingWebhook `sconf:"optional" sconf-doc:"Webhooks for events about outgoing deliveries."` Domain string `sconf-doc:"Default domain for account. Deprecated behaviour: If a destination is not a full address but only a localpart, this domain is added to form a full address."`
IncomingWebhook *IncomingWebhook `sconf:"optional" sconf-doc:"Webhooks for events about incoming deliveries over SMTP."` Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."`
FromIDLoginAddresses []string `sconf:"optional" sconf-doc:"Login addresses that cause outgoing email to be sent with SMTP MAIL FROM addresses with a unique id after the localpart catchall separator (which must be enabled when addresses are specified here). Any delivery status notifications (DSN, e.g. for bounces), can be related to the original message and recipient with unique id's. You can login to an account with any valid email address, including variants with the localpart catchall separator. You can use this mechanism to both send outgoing messages with and without unique fromid for a given email address. With the webapi and webmail, a unique id will be generated. For submission, the id from the SMTP MAIL FROM command is used if present, and a unique id is generated otherwise."` Destinations map[string]Destination `sconf-doc:"Destinations, keys are email addresses (with IDNA domains). If the address is of the form '@domain', i.e. with localpart missing, it serves as a catchall for the domain, matching all messages that are not explicitly configured. Deprecated behaviour: If the address is not a full address but a localpart, it is combined with Domain to form a full address."`
KeepRetiredMessagePeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep messages retired from the queue (delivered or failed) around. Keeping retired messages is useful for maintaining the suppression list for transactional email, for matching incoming DSNs to sent messages, and for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."` SubjectPass struct {
KeepRetiredWebhookPeriod time.Duration `sconf:"optional" sconf-doc:"Period to keep webhooks retired from the queue (delivered or failed) around. Useful for debugging. The time at which to clean up (remove) is calculated at retire time. E.g. 168h (1 week)."` Period time.Duration `sconf-doc:"How long unique values are accepted after generating, e.g. 12h."` // todo: have a reasonable default for this?
} `sconf:"optional" sconf-doc:"If configured, messages classified as weakly spam are rejected with instructions to retry delivery, but this time with a signed token added to the subject. During the next delivery attempt, the signed token will bypass the spam filter. Messages with a clear spam signal, such as a known bad reputation, are rejected/delayed without a signed token."`
RejectsMailbox string `sconf:"optional" sconf-doc:"Mail that looks like spam will be rejected, but a copy can be stored temporarily in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can look there. The mail still isn't accepted, so the remote mail server may retry (hopefully, if legitimate), or give up (hopefully, if indeed a spammer). Messages are automatically removed from this mailbox, so do not set it to a mailbox that has messages you want to keep."`
AutomaticJunkFlags struct {
Enabled bool `sconf-doc:"If enabled, flags will be set automatically if they match a regular expression below. When two of the three mailbox regular expressions are set, the remaining one will match all unmatched messages. Messages are matched in the order specified and the search stops on the first match. Mailboxes are lowercased before matching."`
JunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(junk|spam)."`
NeutralMailboxRegexp string `sconf:"optional" sconf-doc:"Example: ^(inbox|neutral|postmaster|dmarc|tlsrpt|rejects), and you may wish to add trash depending on how you use it, or leave this empty."`
NotJunkMailboxRegexp string `sconf:"optional" sconf-doc:"Example: .* or an empty string."`
} `sconf:"optional" sconf-doc:"Automatically set $Junk and $NotJunk flags based on mailbox messages are delivered/moved/copied to. Email clients typically have too limited functionality to conveniently set these flags, especially $NonJunk, but they can all move messages to a different mailbox, so this helps them."`
JunkFilter *JunkFilter `sconf:"optional" sconf-doc:"Content-based filtering, using the junk-status of individual messages to rank words in such messages as spam or ham. It is recommended you always set the applicable (non)-junk status on messages, and that you do not empty your Trash because those messages contain valuable ham/spam training information."` // todo: sane defaults for junkfilter
MaxOutgoingMessagesPerDay int `sconf:"optional" sconf-doc:"Maximum number of outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 1000."`
MaxFirstTimeRecipientsPerDay int `sconf:"optional" sconf-doc:"Maximum number of first-time recipients in outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 200."`
LoginDisabled string `sconf:"optional" sconf-doc:"If non-empty, login attempts on all protocols (e.g. SMTP/IMAP, web interfaces) is rejected with this error message. Useful during migrations. Incoming deliveries for addresses of this account are still accepted as normal."` DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain.
Domain string `sconf-doc:"Default domain for account. Deprecated behaviour: If a destination is not a full address but only a localpart, this domain is added to form a full address."` JunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
Description string `sconf:"optional" sconf-doc:"Free form description, e.g. full name or alternative contact info."` NeutralMailbox *regexp.Regexp `sconf:"-" json:"-"`
FullName string `sconf:"optional" sconf-doc:"Full name, to use in message From header when composing messages in webmail. Can be overridden per destination."` NotJunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
Destinations map[string]Destination `sconf:"optional" sconf-doc:"Destinations, keys are email addresses (with IDNA domains). All destinations are allowed for logging in with IMAP/SMTP/webmail. If no destinations are configured, the account can not login. If the address is of the form '@domain', i.e. with localpart missing, it serves as a catchall for the domain, matching all messages that are not explicitly configured. Deprecated behaviour: If the address is not a full address but a localpart, it is combined with Domain to form a full address."`
SubjectPass SubjectPass `sconf:"optional" sconf-doc:"If configured, messages classified as weakly spam are rejected with instructions to retry delivery, but this time with a signed token added to the subject. During the next delivery attempt, the signed token will bypass the spam filter. Messages with a clear spam signal, such as a known bad reputation, are rejected/delayed without a signed token."`
QuotaMessageSize int64 `sconf:"optional" sconf-doc:"Default maximum total message size in bytes for the account, overriding any globally configured default maximum size if non-zero. A negative value can be used to have no limit in case there is a limit by default. Attempting to add new messages to an account beyond its maximum total size will result in an error. Useful to prevent a single account from filling storage."`
RejectsMailbox string `sconf:"optional" sconf-doc:"Mail that looks like spam will be rejected, but a copy can be stored temporarily in a mailbox, e.g. Rejects. If mail isn't coming in when you expect, you can look there. The mail still isn't accepted, so the remote mail server may retry (hopefully, if legitimate), or give up (hopefully, if indeed a spammer). Messages are automatically removed from this mailbox, so do not set it to a mailbox that has messages you want to keep."`
KeepRejects bool `sconf:"optional" sconf-doc:"Don't automatically delete mail in the RejectsMailbox listed above. This can be useful, e.g. for future spam training. It can also cause storage to fill up."`
AutomaticJunkFlags AutomaticJunkFlags `sconf:"optional" sconf-doc:"Automatically set $Junk and $NotJunk flags based on mailbox messages are delivered/moved/copied to. Email clients typically have too limited functionality to conveniently set these flags, especially $NonJunk, but they can all move messages to a different mailbox, so this helps them."`
JunkFilter *JunkFilter `sconf:"optional" sconf-doc:"Content-based filtering, using the junk-status of individual messages to rank words in such messages as spam or ham. It is recommended you always set the applicable (non)-junk status on messages, and that you do not empty your Trash because those messages contain valuable ham/spam training information."` // todo: sane defaults for junkfilter
MaxOutgoingMessagesPerDay int `sconf:"optional" sconf-doc:"Maximum number of outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 1000."`
MaxFirstTimeRecipientsPerDay int `sconf:"optional" sconf-doc:"Maximum number of first-time recipients in outgoing messages for this account in a 24 hour window. This limits the damage to recipients and the reputation of this mail server in case of account compromise. Default 200."`
NoFirstTimeSenderDelay bool `sconf:"optional" sconf-doc:"Do not apply a delay to SMTP connections before accepting an incoming message from a first-time sender. Can be useful for accounts that sends automated responses and want instant replies."`
NoCustomPassword bool `sconf:"optional" sconf-doc:"If set, this account cannot set a password of their own choice, but can only set a new randomly generated password, preventing password reuse across services and use of weak passwords. Custom account passwords can be set by the admin."`
Routes []Route `sconf:"optional" sconf-doc:"Routes for delivering outgoing messages through the queue. Each delivery attempt evaluates these account routes, domain routes and finally global routes. The transport of the first matching route is used in the delivery attempt. If no routes match, which is the default with no configured routes, messages are delivered directly from the queue."`
DNSDomain dns.Domain `sconf:"-"` // Parsed form of Domain.
JunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
NeutralMailbox *regexp.Regexp `sconf:"-" json:"-"`
NotJunkMailbox *regexp.Regexp `sconf:"-" json:"-"`
ParsedFromIDLoginAddresses []smtp.Address `sconf:"-" json:"-"`
Aliases []AddressAlias `sconf:"-"`
}
type AddressAlias struct {
SubscriptionAddress string
Alias Alias // Without members.
MemberAddresses []string // Only if allowed to see.
} }
type JunkFilter struct { type JunkFilter struct {
@ -473,19 +259,11 @@ type JunkFilter struct {
} }
type Destination struct { type Destination struct {
Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."` Mailbox string `sconf:"optional" sconf-doc:"Mailbox to deliver to if none of Rulesets match. Default: Inbox."`
Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."` Rulesets []Ruleset `sconf:"optional" sconf-doc:"Delivery rules based on message and SMTP transaction. You may want to match each mailing list by SMTP MailFrom address, VerifiedDomain and/or List-ID header (typically <listname.example.org> if the list address is listname@example.org), delivering them to their own mailbox."`
SMTPError string `sconf:"optional" sconf-doc:"If non-empty, incoming delivery attempts to this destination will be rejected during SMTP RCPT TO with this error response line. Useful when a catchall address is configured for the domain and messages to some addresses should be rejected. The response line must start with an error code. Currently the following error resonse codes are allowed: 421 (temporary local error), 550 (user not found). If the line consists of only an error code, an appropriate error message is added. Rejecting messages with a 4xx code invites later retries by the remote, while 5xx codes should prevent further delivery attempts."`
MessageAuthRequiredSMTPError string `sconf:"optional" sconf-doc:"If non-empty, an additional DMARC-like message authentication check is done for incoming messages, validating the domain in the From-header of the message. Messages without either an aligned SPF or aligned DKIM pass are rejected during the SMTP DATA command with a permanent error code followed by the message in this field. The domain in the message 'From' header is matched in relaxed or strict mode according to the domain's DMARC policy if present, or relaxed mode (organizational instead of exact domain match) otherwise. Useful for autoresponders that don't want to accept messages they don't want to send an automated reply to."`
FullName string `sconf:"optional" sconf-doc:"Full name to use in message From header when composing messages coming from this address with webmail."`
DMARCReports bool `sconf:"-" json:"-"` DMARCReports bool `sconf:"-" json:"-"`
HostTLSReports bool `sconf:"-" json:"-"` TLSReports bool `sconf:"-" json:"-"`
DomainTLSReports bool `sconf:"-" json:"-"`
// Ready to use in SMTP responses.
SMTPErrorCode int `sconf:"-" json:"-"`
SMTPErrorSecode string `sconf:"-" json:"-"`
SMTPErrorMsg string `sconf:"-" json:"-"`
} }
// Equal returns whether d and o are equal, only looking at their user-changeable fields. // Equal returns whether d and o are equal, only looking at their user-changeable fields.
@ -502,22 +280,16 @@ func (d Destination) Equal(o Destination) bool {
} }
type Ruleset struct { type Ruleset struct {
SMTPMailFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. '^user@example\\.org$'."` SMTPMailFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. user@example.org."`
MsgFromRegexp string `sconf:"optional" sconf-doc:"Matches if this regular expression matches (a substring of) the single address in the message From header."`
VerifiedDomain string `sconf:"optional" sconf-doc:"Matches if this domain matches an SPF- and/or DKIM-verified (sub)domain."` VerifiedDomain string `sconf:"optional" sconf-doc:"Matches if this domain matches an SPF- and/or DKIM-verified (sub)domain."`
HeadersRegexp map[string]string `sconf:"optional" sconf-doc:"Matches if these header field/value regular expressions all match (substrings of) the message headers. Header fields and valuees are converted to lower case before matching. Whitespace is trimmed from the value before matching. A header field can occur multiple times in a message, only one instance has to match. For mailing lists, you could match on ^list-id$ with the value typically the mailing list address in angled brackets with @ replaced with a dot, e.g. <name\\.lists\\.example\\.org>."` HeadersRegexp map[string]string `sconf:"optional" sconf-doc:"Matches if these header field/value regular expressions all match (substrings of) the message headers. Header fields and valuees are converted to lower case before matching. Whitespace is trimmed from the value before matching. A header field can occur multiple times in a message, only one instance has to match. For mailing lists, you could match on ^list-id$ with the value typically the mailing list address in angled brackets with @ replaced with a dot, e.g. <name\\.lists\\.example\\.org>."`
// todo: add a SMTPRcptTo check // todo: add a SMTPRcptTo check, and MessageFrom that works on a properly parsed From header.
// todo: once we implement ARC, we can use dkim domains that we cannot verify but that the arc-verified forwarding mail server was able to verify. ListAllowDomain string `sconf:"optional" sconf-doc:"Influence the spam filtering, this does not change whether this ruleset applies to a message. If this domain matches an SPF- and/or DKIM-verified (sub)domain, the message is accepted without further spam checks, such as a junk filter or DMARC reject evaluation. DMARC rejects should not apply for mailing lists that are not configured to rewrite the From-header of messages that don't have a passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you may be automatically unsubscribed from the mailing list. The assumption is that mailing lists do their own spam filtering/moderation."`
IsForward bool `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. Can only be used together with SMTPMailFromRegexp and VerifiedDomain. SMTPMailFromRegexp must be set to the address used to deliver the forwarded message, e.g. '^user(|\\+.*)@forward\\.example$'. Changes to junk analysis: 1. Messages are not rejected for failing a DMARC policy, because a legitimate forwarded message without valid/intact/aligned DKIM signature would be rejected because any verified SPF domain will be 'unaligned', of the forwarding mail server. 2. The sending mail server IP address, and sending EHLO and MAIL FROM domains and matching DKIM domain aren't used in future reputation-based spam classifications (but other verified DKIM domains are) because the forwarding server is not a useful spam signal for future messages."`
ListAllowDomain string `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. If this domain matches an SPF- and/or DKIM-verified (sub)domain, the message is accepted without further spam checks, such as a junk filter or DMARC reject evaluation. DMARC rejects should not apply for mailing lists that are not configured to rewrite the From-header of messages that don't have a passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you may be automatically unsubscribed from the mailing list. The assumption is that mailing lists do their own spam filtering/moderation."`
AcceptRejectsToMailbox string `sconf:"optional" sconf-doc:"Influences spam filtering only, this option does not change whether a message matches this ruleset. If a message is classified as spam, it isn't rejected during the SMTP transaction (the normal behaviour), but accepted during the SMTP transaction and delivered to the specified mailbox. The specified mailbox is not automatically cleaned up like the account global Rejects mailbox, unless set to that Rejects mailbox."`
Mailbox string `sconf-doc:"Mailbox to deliver to if this ruleset matches."` Mailbox string `sconf-doc:"Mailbox to deliver to if this ruleset matches."`
Comment string `sconf:"optional" sconf-doc:"Free-form comments."`
SMTPMailFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"` SMTPMailFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"`
MsgFromRegexpCompiled *regexp.Regexp `sconf:"-" json:"-"`
VerifiedDNSDomain dns.Domain `sconf:"-"` VerifiedDNSDomain dns.Domain `sconf:"-"`
HeadersRegexpCompiled [][2]*regexp.Regexp `sconf:"-" json:"-"` HeadersRegexpCompiled [][2]*regexp.Regexp `sconf:"-" json:"-"`
ListAllowDNSDomain dns.Domain `sconf:"-"` ListAllowDNSDomain dns.Domain `sconf:"-"`
@ -525,7 +297,7 @@ type Ruleset struct {
// Equal returns whether r and o are equal, only looking at their user-changeable fields. // Equal returns whether r and o are equal, only looking at their user-changeable fields.
func (r Ruleset) Equal(o Ruleset) bool { func (r Ruleset) Equal(o Ruleset) bool {
if r.SMTPMailFromRegexp != o.SMTPMailFromRegexp || r.MsgFromRegexp != o.MsgFromRegexp || r.VerifiedDomain != o.VerifiedDomain || r.IsForward != o.IsForward || r.ListAllowDomain != o.ListAllowDomain || r.AcceptRejectsToMailbox != o.AcceptRejectsToMailbox || r.Mailbox != o.Mailbox || r.Comment != o.Comment { if r.SMTPMailFromRegexp != o.SMTPMailFromRegexp || r.VerifiedDomain != o.VerifiedDomain || r.ListAllowDomain != o.ListAllowDomain || r.Mailbox != o.Mailbox {
return false return false
} }
if !reflect.DeepEqual(r.HeadersRegexp, o.HeadersRegexp) { if !reflect.DeepEqual(r.HeadersRegexp, o.HeadersRegexp) {
@ -540,31 +312,22 @@ type KeyCert struct {
} }
type TLS struct { type TLS struct {
ACME string `sconf:"optional" sconf-doc:"Name of provider from top-level configuration to use for ACME, e.g. letsencrypt."` ACME string `sconf:"optional" sconf-doc:"Name of provider from top-level configuration to use for ACME, e.g. letsencrypt."`
KeyCerts []KeyCert `sconf:"optional" sconf-doc:"Keys and certificates to use for this listener. The files are opened by the privileged root process and passed to the unprivileged mox process, so no special permissions are required on the files. If the private key will not be replaced when refreshing certificates, also consider adding the private key to HostPrivateKeyFiles and configuring DANE TLSA DNS records."` KeyCerts []KeyCert `sconf:"optional" sconf-doc:"Key and certificate files are opened by the privileged root process and passed to the unprivileged mox process, so no special permissions are required."`
MinVersion string `sconf:"optional" sconf-doc:"Minimum TLS version. Default: TLSv1.2."` MinVersion string `sconf:"optional" sconf-doc:"Minimum TLS version. Default: TLSv1.2."`
HostPrivateKeyFiles []string `sconf:"optional" sconf-doc:"Private keys used for ACME certificates. Specified explicitly so DANE TLSA DNS records can be generated, even before the certificates are requested. DANE is a mechanism to authenticate remote TLS certificates based on a public key or certificate specified in DNS, protected with DNSSEC. DANE is opportunistic and attempted when delivering SMTP with STARTTLS. The private key files must be in PEM format. PKCS8 is recommended, but PKCS1 and EC private keys are recognized as well. Only RSA 2048 bit and ECDSA P-256 keys are currently used. The first of each is used when requesting new certificates through ACME."`
ClientAuthDisabled bool `sconf:"optional" sconf-doc:"Disable TLS client authentication with certificates/keys, preventing the TLS server from requesting a TLS certificate from clients. Useful for working around clients that don't handle TLS client authentication well."`
Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443. Connections without SNI will use a certificate for the hostname of the listener, connections with an SNI hostname that isn't allowed will be rejected. Config *tls.Config `sconf:"-" json:"-"` // TLS config for non-ACME-verification connections, i.e. SMTP and IMAP, and not port 443.
ConfigFallback *tls.Config `sconf:"-" json:"-"` // Like Config, but uses the certificate for the listener hostname when the requested SNI hostname is not allowed, instead of causing the connection to fail. ACMEConfig *tls.Config `sconf:"-" json:"-"` // TLS config that handles ACME verification, for serving on port 443.
ACMEConfig *tls.Config `sconf:"-" json:"-"` // TLS config that handles ACME verification, for serving on port 443.
HostPrivateRSA2048Keys []crypto.Signer `sconf:"-" json:"-"` // Private keys for new TLS certificates for listener host name, for new certificates with ACME, and for DANE records.
HostPrivateECDSAP256Keys []crypto.Signer `sconf:"-" json:"-"`
} }
// todo: we could implement matching WebHandler.Domain as IPs too
type WebHandler struct { type WebHandler struct {
LogName string `sconf:"optional" sconf-doc:"Name to use in logging and metrics."` LogName string `sconf:"optional" sconf-doc:"Name to use in logging and metrics."`
Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward, WebInternal must be set."` Domain string `sconf-doc:"Both Domain and PathRegexp must match for this WebHandler to match a request. Exactly one of WebStatic, WebRedirect, WebForward must be set."`
PathRegexp string `sconf-doc:"Regular expression matched against request path, must always start with ^ to ensure matching from the start of the path. The matching prefix can optionally be stripped by WebForward. The regular expression does not have to end with $."` PathRegexp string `sconf-doc:"Regular expression matched against request path, must always start with ^ to ensure matching from the start of the path. The matching prefix can optionally be stripped by WebForward. The regular expression does not have to end with $."`
DontRedirectPlainHTTP bool `sconf:"optional" sconf-doc:"If set, plain HTTP requests are not automatically permanently redirected (308) to HTTPS. If you don't have a HTTPS webserver configured, set this to true."` DontRedirectPlainHTTP bool `sconf:"optional" sconf-doc:"If set, plain HTTP requests are not automatically permanently redirected (308) to HTTPS. If you don't have a HTTPS webserver configured, set this to true."`
Compress bool `sconf:"optional" sconf-doc:"Transparently compress responses (currently with gzip) if the client supports it, the status is 200 OK, no Content-Encoding is set on the response yet and the Content-Type of the response hints that the data is compressible (text/..., specific application/... and .../...+json and .../...+xml). For static files only, a cache with compressed files is kept."`
WebStatic *WebStatic `sconf:"optional" sconf-doc:"Serve static files."` WebStatic *WebStatic `sconf:"optional" sconf-doc:"Serve static files."`
WebRedirect *WebRedirect `sconf:"optional" sconf-doc:"Redirect requests to configured URL."` WebRedirect *WebRedirect `sconf:"optional" sconf-doc:"Redirect requests to configured URL."`
WebForward *WebForward `sconf:"optional" sconf-doc:"Forward requests to another webserver, i.e. reverse proxy."` WebForward *WebForward `sconf:"optional" sconf-doc:"Forward requests to another webserver, i.e. reverse proxy."`
WebInternal *WebInternal `sconf:"optional" sconf-doc:"Pass request to internal service, like webmail, webapi, etc."`
Name string `sconf:"-"` // Either LogName, or numeric index if LogName was empty. Used instead of LogName in logging/metrics. Name string `sconf:"-"` // Either LogName, or numeric index if LogName was empty. Used instead of LogName in logging/metrics.
DNSDomain dns.Domain `sconf:"-"` DNSDomain dns.Domain `sconf:"-"`
@ -580,7 +343,6 @@ func (wh WebHandler) Equal(o WebHandler) bool {
x.WebStatic = nil x.WebStatic = nil
x.WebRedirect = nil x.WebRedirect = nil
x.WebForward = nil x.WebForward = nil
x.WebInternal = nil
return x return x
} }
cwh := clean(wh) cwh := clean(wh)
@ -588,7 +350,7 @@ func (wh WebHandler) Equal(o WebHandler) bool {
if cwh != co { if cwh != co {
return false return false
} }
if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) || (wh.WebInternal == nil) != (o.WebInternal == nil) { if (wh.WebStatic == nil) != (o.WebStatic == nil) || (wh.WebRedirect == nil) != (o.WebRedirect == nil) || (wh.WebForward == nil) != (o.WebForward == nil) {
return false return false
} }
if wh.WebStatic != nil { if wh.WebStatic != nil {
@ -600,9 +362,6 @@ func (wh WebHandler) Equal(o WebHandler) bool {
if wh.WebForward != nil { if wh.WebForward != nil {
return wh.WebForward.equal(*o.WebForward) return wh.WebForward.equal(*o.WebForward)
} }
if wh.WebInternal != nil {
return wh.WebInternal.equal(*o.WebInternal)
}
return true return true
} }
@ -645,16 +404,3 @@ func (wf WebForward) equal(o WebForward) bool {
o.TargetURL = nil o.TargetURL = nil
return reflect.DeepEqual(wf, o) return reflect.DeepEqual(wf, o)
} }
type WebInternal struct {
BasePath string `sconf-doc:"Path to use as root of internal service, e.g. /webmail/."`
Service string `sconf-doc:"Name of the service, values: admin, account, webmail, webapi."`
Handler http.Handler `sconf:"-" json:"-"`
}
func (wi WebInternal) equal(o WebInternal) bool {
wi.Handler = nil
o.Handler = nil
return reflect.DeepEqual(wi, o)
}

File diff suppressed because it is too large Load Diff

1566
ctl.go

File diff suppressed because it is too large Load Diff

View File

@ -1,558 +0,0 @@
//go:build !integration
package main
import (
"context"
"crypto/ed25519"
cryptorand "crypto/rand"
"crypto/x509"
"flag"
"fmt"
"log/slog"
"math/big"
"net"
"os"
"path/filepath"
"testing"
"time"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dmarcdb"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/imapclient"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/mtastsdb"
"github.com/mjl-/mox/queue"
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/store"
"github.com/mjl-/mox/tlsrptdb"
)
var ctxbg = context.Background()
var pkglog = mlog.New("ctl", nil)
func tcheck(t *testing.T, err error, errmsg string) {
if err != nil {
t.Helper()
t.Fatalf("%s: %v", errmsg, err)
}
}
// TestCtl executes commands through ctl. This tests at least the protocols (who
// sends when/what) is tested. We often don't check the actual results, but
// unhandled errors would cause a panic.
func TestCtl(t *testing.T) {
os.RemoveAll("testdata/ctl/data")
mox.ConfigStaticPath = filepath.FromSlash("testdata/ctl/config/mox.conf")
mox.ConfigDynamicPath = filepath.FromSlash("testdata/ctl/config/domains.conf")
if errs := mox.LoadConfig(ctxbg, pkglog, true, false); len(errs) > 0 {
t.Fatalf("loading mox config: %v", errs)
}
err := store.Init(ctxbg)
tcheck(t, err, "store init")
defer store.Close()
defer store.Switchboard()()
err = queue.Init()
tcheck(t, err, "queue init")
defer queue.Shutdown()
var cid int64
testctl := func(fn func(clientxctl *ctl)) {
t.Helper()
cconn, sconn := net.Pipe()
clientxctl := ctl{conn: cconn, log: pkglog}
serverxctl := ctl{conn: sconn, log: pkglog}
done := make(chan struct{})
go func() {
cid++
servectlcmd(ctxbg, &serverxctl, cid, func() {})
close(done)
}()
fn(&clientxctl)
cconn.Close()
<-done
sconn.Close()
}
// "deliver"
testctl(func(xctl *ctl) {
ctlcmdDeliver(xctl, "mjl@mox.example")
})
// "setaccountpassword"
testctl(func(xctl *ctl) {
ctlcmdSetaccountpassword(xctl, "mjl", "test4321")
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesList(xctl)
})
// All messages.
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesAdd(xctl, "", "", "")
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesAdd(xctl, "", "☺.mox.example", "")
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesAdd(xctl, "mox", "☺.mox.example", "example.com")
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesRemove(xctl, 1)
})
// Queue a message to list/change/dump.
msg := "Subject: subject\r\n\r\nbody\r\n"
msgFile, err := store.CreateMessageTemp(pkglog, "queuedump-test")
tcheck(t, err, "temp file")
_, err = msgFile.Write([]byte(msg))
tcheck(t, err, "write message")
_, err = msgFile.Seek(0, 0)
tcheck(t, err, "rewind message")
defer os.Remove(msgFile.Name())
defer msgFile.Close()
addr, err := smtp.ParseAddress("mjl@mox.example")
tcheck(t, err, "parse address")
qml := []queue.Msg{queue.MakeMsg(addr.Path(), addr.Path(), false, false, int64(len(msg)), "<random@localhost>", nil, nil, time.Now(), "subject")}
queue.Add(ctxbg, pkglog, "mjl", msgFile, qml...)
qmid := qml[0].ID
// Has entries now.
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesList(xctl)
})
// "queuelist"
testctl(func(xctl *ctl) {
ctlcmdQueueList(xctl, queue.Filter{}, queue.Sort{})
})
// "queueholdset"
testctl(func(xctl *ctl) {
ctlcmdQueueHoldSet(xctl, queue.Filter{}, true)
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldSet(xctl, queue.Filter{}, false)
})
// "queueschedule"
testctl(func(xctl *ctl) {
ctlcmdQueueSchedule(xctl, queue.Filter{}, true, time.Minute)
})
// "queuetransport"
testctl(func(xctl *ctl) {
ctlcmdQueueTransport(xctl, queue.Filter{}, "socks")
})
// "queuerequiretls"
testctl(func(xctl *ctl) {
ctlcmdQueueRequireTLS(xctl, queue.Filter{}, nil)
})
// "queuedump"
testctl(func(xctl *ctl) {
ctlcmdQueueDump(xctl, fmt.Sprintf("%d", qmid))
})
// "queuefail"
testctl(func(xctl *ctl) {
ctlcmdQueueFail(xctl, queue.Filter{})
})
// "queuedrop"
testctl(func(xctl *ctl) {
ctlcmdQueueDrop(xctl, queue.Filter{})
})
// "queueholdruleslist"
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesList(xctl)
})
// "queueholdrulesadd"
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "", "")
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesAdd(xctl, "mjl", "localhost", "")
})
// "queueholdrulesremove"
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesRemove(xctl, 2)
})
testctl(func(xctl *ctl) {
ctlcmdQueueHoldrulesList(xctl)
})
// "queuesuppresslist"
testctl(func(xctl *ctl) {
ctlcmdQueueSuppressList(xctl, "mjl")
})
// "queuesuppressadd"
testctl(func(xctl *ctl) {
ctlcmdQueueSuppressAdd(xctl, "mjl", "base@localhost")
})
testctl(func(xctl *ctl) {
ctlcmdQueueSuppressAdd(xctl, "mjl", "other@localhost")
})
// "queuesuppresslookup"
testctl(func(xctl *ctl) {
ctlcmdQueueSuppressLookup(xctl, "mjl", "base@localhost")
})
// "queuesuppressremove"
testctl(func(xctl *ctl) {
ctlcmdQueueSuppressRemove(xctl, "mjl", "base@localhost")
})
testctl(func(xctl *ctl) {
ctlcmdQueueSuppressList(xctl, "mjl")
})
// "queueretiredlist"
testctl(func(xctl *ctl) {
ctlcmdQueueRetiredList(xctl, queue.RetiredFilter{}, queue.RetiredSort{})
})
// "queueretiredprint"
testctl(func(xctl *ctl) {
ctlcmdQueueRetiredPrint(xctl, "1")
})
// "queuehooklist"
testctl(func(xctl *ctl) {
ctlcmdQueueHookList(xctl, queue.HookFilter{}, queue.HookSort{})
})
// "queuehookschedule"
testctl(func(xctl *ctl) {
ctlcmdQueueHookSchedule(xctl, queue.HookFilter{}, true, time.Minute)
})
// "queuehookprint"
testctl(func(xctl *ctl) {
ctlcmdQueueHookPrint(xctl, "1")
})
// "queuehookcancel"
testctl(func(xctl *ctl) {
ctlcmdQueueHookCancel(xctl, queue.HookFilter{})
})
// "queuehookretiredlist"
testctl(func(xctl *ctl) {
ctlcmdQueueHookRetiredList(xctl, queue.HookRetiredFilter{}, queue.HookRetiredSort{})
})
// "queuehookretiredprint"
testctl(func(xctl *ctl) {
ctlcmdQueueHookRetiredPrint(xctl, "1")
})
// "importmbox"
testctl(func(xctl *ctl) {
ctlcmdImport(xctl, true, "mjl", "inbox", "testdata/importtest.mbox")
})
// "importmaildir"
testctl(func(xctl *ctl) {
ctlcmdImport(xctl, false, "mjl", "inbox", "testdata/importtest.maildir")
})
// "domainadd"
testctl(func(xctl *ctl) {
ctlcmdConfigDomainAdd(xctl, false, dns.Domain{ASCII: "mox2.example"}, "mjl", "")
})
// "accountadd"
testctl(func(xctl *ctl) {
ctlcmdConfigAccountAdd(xctl, "mjl2", "mjl2@mox2.example")
})
// "addressadd"
testctl(func(xctl *ctl) {
ctlcmdConfigAddressAdd(xctl, "mjl3@mox2.example", "mjl2")
})
// Add a message.
testctl(func(xctl *ctl) {
ctlcmdDeliver(xctl, "mjl3@mox2.example")
})
// "retrain", retrain junk filter.
testctl(func(xctl *ctl) {
ctlcmdRetrain(xctl, "mjl2")
})
// "addressrm"
testctl(func(xctl *ctl) {
ctlcmdConfigAddressRemove(xctl, "mjl3@mox2.example")
})
// "accountdisabled"
testctl(func(xctl *ctl) {
ctlcmdConfigAccountDisabled(xctl, "mjl2", "testing")
})
// "accountlist"
testctl(func(xctl *ctl) {
ctlcmdConfigAccountList(xctl)
})
testctl(func(xctl *ctl) {
ctlcmdConfigAccountDisabled(xctl, "mjl2", "")
})
// "accountrm"
testctl(func(xctl *ctl) {
ctlcmdConfigAccountRemove(xctl, "mjl2")
})
// "domaindisabled"
testctl(func(xctl *ctl) {
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, true)
})
testctl(func(xctl *ctl) {
ctlcmdConfigDomainDisabled(xctl, dns.Domain{ASCII: "mox2.example"}, false)
})
// "domainrm"
testctl(func(xctl *ctl) {
ctlcmdConfigDomainRemove(xctl, dns.Domain{ASCII: "mox2.example"})
})
// "aliasadd"
testctl(func(xctl *ctl) {
ctlcmdConfigAliasAdd(xctl, "support@mox.example", config.Alias{Addresses: []string{"mjl@mox.example"}})
})
// "aliaslist"
testctl(func(xctl *ctl) {
ctlcmdConfigAliasList(xctl, "mox.example")
})
// "aliasprint"
testctl(func(xctl *ctl) {
ctlcmdConfigAliasPrint(xctl, "support@mox.example")
})
// "aliasupdate"
testctl(func(xctl *ctl) {
ctlcmdConfigAliasUpdate(xctl, "support@mox.example", "true", "true", "true")
})
// "aliasaddaddr"
testctl(func(xctl *ctl) {
ctlcmdConfigAliasAddaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
})
// "aliasrmaddr"
testctl(func(xctl *ctl) {
ctlcmdConfigAliasRmaddr(xctl, "support@mox.example", []string{"mjl2@mox.example"})
})
// "aliasrm"
testctl(func(xctl *ctl) {
ctlcmdConfigAliasRemove(xctl, "support@mox.example")
})
// accounttlspubkeyadd
certDER := fakeCert(t)
testctl(func(xctl *ctl) {
ctlcmdConfigTlspubkeyAdd(xctl, "mjl@mox.example", "testkey", false, certDER)
})
// "accounttlspubkeylist"
testctl(func(xctl *ctl) {
ctlcmdConfigTlspubkeyList(xctl, "")
})
testctl(func(xctl *ctl) {
ctlcmdConfigTlspubkeyList(xctl, "mjl")
})
tpkl, err := store.TLSPublicKeyList(ctxbg, "")
tcheck(t, err, "list tls public keys")
if len(tpkl) != 1 {
t.Fatalf("got %d tls public keys, expected 1", len(tpkl))
}
fingerprint := tpkl[0].Fingerprint
// "accounttlspubkeyget"
testctl(func(xctl *ctl) {
ctlcmdConfigTlspubkeyGet(xctl, fingerprint)
})
// "accounttlspubkeyrm"
testctl(func(xctl *ctl) {
ctlcmdConfigTlspubkeyRemove(xctl, fingerprint)
})
tpkl, err = store.TLSPublicKeyList(ctxbg, "")
tcheck(t, err, "list tls public keys")
if len(tpkl) != 0 {
t.Fatalf("got %d tls public keys, expected 0", len(tpkl))
}
// "loglevels"
testctl(func(xctl *ctl) {
ctlcmdLoglevels(xctl)
})
// "setloglevels"
testctl(func(xctl *ctl) {
ctlcmdSetLoglevels(xctl, "", "debug")
})
testctl(func(xctl *ctl) {
ctlcmdSetLoglevels(xctl, "smtpserver", "debug")
})
// Export data, import it again
xcmdExport(true, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
xcmdExport(false, false, []string{filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/"), filepath.FromSlash("testdata/ctl/data/accounts/mjl")}, &cmd{log: pkglog})
testctl(func(xctl *ctl) {
ctlcmdImport(xctl, true, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/mbox/Inbox.mbox"))
})
testctl(func(xctl *ctl) {
ctlcmdImport(xctl, false, "mjl", "inbox", filepath.FromSlash("testdata/ctl/data/tmp/export/maildir/Inbox"))
})
// "recalculatemailboxcounts"
testctl(func(xctl *ctl) {
ctlcmdRecalculateMailboxCounts(xctl, "mjl")
})
// "fixmsgsize"
testctl(func(xctl *ctl) {
ctlcmdFixmsgsize(xctl, "mjl")
})
testctl(func(xctl *ctl) {
acc, err := store.OpenAccount(xctl.log, "mjl", false)
tcheck(t, err, "open account")
defer func() {
acc.Close()
acc.WaitClosed()
}()
content := []byte("Subject: hi\r\n\r\nbody\r\n")
deliver := func(m *store.Message) {
t.Helper()
m.Size = int64(len(content))
msgf, err := store.CreateMessageTemp(xctl.log, "ctltest")
tcheck(t, err, "create temp file")
defer os.Remove(msgf.Name())
defer msgf.Close()
_, err = msgf.Write(content)
tcheck(t, err, "write message file")
acc.WithWLock(func() {
err = acc.DeliverMailbox(xctl.log, "Inbox", m, msgf)
tcheck(t, err, "deliver message")
})
}
var msgBadSize store.Message
deliver(&msgBadSize)
msgBadSize.Size = 1
err = acc.DB.Update(ctxbg, &msgBadSize)
tcheck(t, err, "update message to bad size")
mb := store.Mailbox{ID: msgBadSize.MailboxID}
err = acc.DB.Get(ctxbg, &mb)
tcheck(t, err, "get db")
mb.Size -= int64(len(content))
mb.Size += 1
err = acc.DB.Update(ctxbg, &mb)
tcheck(t, err, "update mailbox size")
// Fix up the size.
ctlcmdFixmsgsize(xctl, "")
err = acc.DB.Get(ctxbg, &msgBadSize)
tcheck(t, err, "get message")
if msgBadSize.Size != int64(len(content)) {
t.Fatalf("after fixing, message size is %d, should be %d", msgBadSize.Size, len(content))
}
})
// "reparse"
testctl(func(xctl *ctl) {
ctlcmdReparse(xctl, "mjl")
})
testctl(func(xctl *ctl) {
ctlcmdReparse(xctl, "")
})
// "reassignthreads"
testctl(func(xctl *ctl) {
ctlcmdReassignthreads(xctl, "mjl")
})
testctl(func(xctl *ctl) {
ctlcmdReassignthreads(xctl, "")
})
// "backup", backup account.
err = dmarcdb.Init()
tcheck(t, err, "dmarcdb init")
defer dmarcdb.Close()
err = mtastsdb.Init(false)
tcheck(t, err, "mtastsdb init")
defer mtastsdb.Close()
err = tlsrptdb.Init()
tcheck(t, err, "tlsrptdb init")
defer tlsrptdb.Close()
testctl(func(xctl *ctl) {
os.RemoveAll("testdata/ctl/data/tmp/backup")
err := os.WriteFile("testdata/ctl/data/receivedid.key", make([]byte, 16), 0600)
tcheck(t, err, "writing receivedid.key")
ctlcmdBackup(xctl, filepath.FromSlash("testdata/ctl/data/tmp/backup"), false)
})
// Verify the backup.
xcmd := cmd{
flag: flag.NewFlagSet("", flag.ExitOnError),
flagArgs: []string{filepath.FromSlash("testdata/ctl/data/tmp/backup/data")},
}
cmdVerifydata(&xcmd)
// IMAP connection.
testctl(func(xctl *ctl) {
a, b := net.Pipe()
go func() {
opts := imapclient.Opts{
Logger: slog.Default().With("cid", mox.Cid()),
Error: func(err error) { panic(err) },
}
client, err := imapclient.New(a, &opts)
tcheck(t, err, "new imapclient")
client.Select("inbox")
client.Logout()
defer a.Close()
}()
ctlcmdIMAPServe(xctl, "mjl@mox.example", b, b)
})
}
func fakeCert(t *testing.T) []byte {
t.Helper()
seed := make([]byte, ed25519.SeedSize)
privKey := ed25519.NewKeyFromSeed(seed) // Fake key, don't use this for real!
template := &x509.Certificate{
SerialNumber: big.NewInt(1), // Required field...
}
localCertBuf, err := x509.CreateCertificate(cryptorand.Reader, template, template, privKey.Public(), privKey)
tcheck(t, err, "making certificate")
return localCertBuf
}

View File

@ -1,14 +0,0 @@
//go:build !go1.24
package main
import (
"crypto/tls"
)
var curvesList = []tls.CurveID{
tls.CurveP256,
tls.CurveP384,
tls.CurveP521,
tls.X25519,
}

View File

@ -1,15 +0,0 @@
//go:build go1.24
package main
import (
"crypto/tls"
)
var curvesList = []tls.CurveID{
tls.CurveP256,
tls.CurveP384,
tls.CurveP521,
tls.X25519,
tls.X25519MLKEM768,
}

View File

@ -1,516 +0,0 @@
// Package dane verifies TLS certificates through DNSSEC-verified TLSA records.
//
// On the internet, TLS certificates are commonly verified by checking if they are
// signed by one of many commonly trusted Certificate Authorities (CAs). This is
// PKIX or WebPKI. With DANE, TLS certificates are verified through
// DNSSEC-protected DNS records of type TLSA. These TLSA records specify the rules
// for verification ("usage") and whether a full certificate ("selector" cert) is
// checked or only its "subject public key info" ("selector" spki). The (hash of)
// the certificate or "spki" is included in the TLSA record ("matchtype").
//
// DANE SMTP connections have two allowed "usages" (verification rules):
// - DANE-EE, which only checks if the certificate or spki match, without the
// WebPKI verification of expiration, name or signed-by-trusted-party verification.
// - DANE-TA, which does verification similar to PKIX/WebPKI, but verifies against
// a certificate authority ("trust anchor", or "TA") specified in the TLSA record
// instead of the CA pool.
//
// DANE has two more "usages", that may be used with protocols other than SMTP:
// - PKIX-EE, which matches the certificate or spki, and also verifies the
// certificate against the CA pool.
// - PKIX-TA, which verifies the certificate or spki against a "trust anchor"
// specified in the TLSA record, that also has to be trusted by the CA pool.
//
// TLSA records are looked up for a specific port number, protocol (tcp/udp) and
// host name. Each port can have different TLSA records. TLSA records must be
// signed and verified with DNSSEC before they can be trusted and used.
//
// TLSA records are looked up under "TLSA candidate base domains". The domain
// where the TLSA records are found is the "TLSA base domain". If the host to
// connect to is a CNAME that can be followed with DNSSEC protection, it is the
// first TLSA candidate base domain. If no protected records are found, the
// original host name is the second TLSA candidate base domain.
//
// For TLS connections, the TLSA base domain is used with SNI during the
// handshake.
//
// For TLS certificate verification that requires PKIX/WebPKI/trusted-anchor
// verification (all except DANE-EE), the potential second TLSA candidate base
// domain name is also a valid hostname. With SMTP, additionally for hosts found in
// MX records for a "next-hop domain", the "original next-hop domain" (domain of an
// email address to deliver to) is also a valid name, as is the "CNAME-expanded
// original next-hop domain", bringing the potential total allowed names to four
// (if CNAMEs are followed for the MX hosts).
package dane
// todo: why is https://datatracker.ietf.org/doc/html/draft-barnes-dane-uks-00 not in use? sounds reasonable.
// todo: add a DialSRV function that accepts a domain name, looks up srv records, dials the service, verifies dane certificate and returns the connection. for ../rfc/7673
import (
"bytes"
"context"
"crypto/sha256"
"crypto/sha512"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"log/slog"
"net"
"strings"
"time"
"github.com/mjl-/adns"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/stub"
"slices"
)
var (
MetricVerify stub.Counter = stub.CounterIgnore{}
MetricVerifyErrors stub.Counter = stub.CounterIgnore{}
)
var (
// ErrNoRecords means no TLSA records were found and host has not opted into DANE.
ErrNoRecords = errors.New("dane: no tlsa records")
// ErrInsecure indicates insecure DNS responses were encountered while looking up
// the host, CNAME records, or TLSA records.
ErrInsecure = errors.New("dane: dns lookups insecure")
// ErrNoMatch means some TLSA records were found, but none can be verified against
// the remote TLS certificate.
ErrNoMatch = errors.New("dane: no match between certificate and tlsa records")
)
// VerifyError is an error encountered while verifying a DANE TLSA record. For
// example, an error encountered with x509 certificate trusted-anchor verification.
// A TLSA record that does not match a TLS certificate is not a VerifyError.
type VerifyError struct {
Err error // Underlying error, possibly from crypto/x509.
Record adns.TLSA // Cause of error.
}
// Error returns a string explaining this is a dane verify error along with the
// underlying error.
func (e VerifyError) Error() string {
return fmt.Sprintf("dane verify error: %s", e.Err)
}
// Unwrap returns the underlying error.
func (e VerifyError) Unwrap() error {
return e.Err
}
// Dial looks up DNSSEC-protected DANE TLSA records for the domain name and
// port/service in address, checks for allowed usages, makes a network connection
// and verifies the remote certificate against the TLSA records. If verification
// succeeds, the verified record is returned.
//
// Different protocols require different usages. For example, SMTP with STARTTLS
// for delivery only allows usages DANE-TA and DANE-EE. If allowedUsages is
// non-nil, only the specified usages are taken into account when verifying, and
// any others ignored.
//
// Errors that can be returned, possibly in wrapped form:
// - ErrNoRecords, also in case the DNS response indicates "not found".
// - adns.DNSError, potentially wrapping adns.ExtendedError of which some can
// indicate DNSSEC errors.
// - ErrInsecure
// - VerifyError, potentially wrapping errors from crypto/x509.
func Dial(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, network, address string, allowedUsages []adns.TLSAUsage, pkixRoots *x509.CertPool) (net.Conn, adns.TLSA, error) {
log := mlog.New("dane", elog)
// Split host and port.
host, portstr, err := net.SplitHostPort(address)
if err != nil {
return nil, adns.TLSA{}, fmt.Errorf("parsing address: %w", err)
}
port, err := resolver.LookupPort(ctx, network, portstr)
if err != nil {
return nil, adns.TLSA{}, fmt.Errorf("parsing port: %w", err)
}
hostDom, err := dns.ParseDomain(strings.TrimSuffix(host, "."))
if err != nil {
return nil, adns.TLSA{}, fmt.Errorf("parsing host: %w", err)
}
// ../rfc/7671:1015
// First follow CNAMEs for host. If the path to the final name is secure, we must
// lookup TLSA there first, then fallback to the original name. If the final name
// is secure that's also the SNI server name we must use, with the original name as
// allowed host during certificate name checks (for all TLSA usages other than
// DANE-EE).
cnameDom := hostDom
cnameAuthentic := true
for i := 0; ; i += 1 {
if i == 10 {
return nil, adns.TLSA{}, fmt.Errorf("too many cname lookups")
}
cname, cnameResult, err := resolver.LookupCNAME(ctx, cnameDom.ASCII+".")
cnameAuthentic = cnameAuthentic && cnameResult.Authentic
if !cnameResult.Authentic && i == 0 {
return nil, adns.TLSA{}, fmt.Errorf("%w: cname lookup insecure", ErrInsecure)
} else if dns.IsNotFound(err) {
break
} else if err != nil {
return nil, adns.TLSA{}, fmt.Errorf("resolving cname %s: %w", cnameDom, err)
} else if d, err := dns.ParseDomain(strings.TrimSuffix(cname, ".")); err != nil {
return nil, adns.TLSA{}, fmt.Errorf("parsing cname: %w", err)
} else {
cnameDom = d
}
}
// We lookup the IP.
ipnetwork := "ip"
if strings.HasSuffix(network, "4") {
ipnetwork += "4"
} else if strings.HasSuffix(network, "6") {
ipnetwork += "6"
}
ips, _, err := resolver.LookupIP(ctx, ipnetwork, cnameDom.ASCII+".")
// note: For SMTP with opportunistic DANE we would stop here with an insecure
// response. But as long as long as we have a verified original tlsa base name, we
// can continue with regular DANE.
if err != nil {
return nil, adns.TLSA{}, fmt.Errorf("resolving ips: %w", err)
} else if len(ips) == 0 {
return nil, adns.TLSA{}, &adns.DNSError{Err: "no ips for host", Name: cnameDom.ASCII, IsNotFound: true}
}
// Lookup TLSA records. If resolving CNAME was secure, we try that first. Otherwise
// we try at the secure original domain.
baseDom := hostDom
if cnameAuthentic {
baseDom = cnameDom
}
var records []adns.TLSA
var result adns.Result
for {
var err error
records, result, err = resolver.LookupTLSA(ctx, port, network, baseDom.ASCII+".")
// If no (secure) records can be found at the final cname, and there is an original
// name, try at original name.
// ../rfc/7671:1015
if baseDom != hostDom && (dns.IsNotFound(err) || !result.Authentic) {
baseDom = hostDom
continue
}
if !result.Authentic {
return nil, adns.TLSA{}, ErrInsecure
} else if dns.IsNotFound(err) {
return nil, adns.TLSA{}, ErrNoRecords
} else if err != nil {
return nil, adns.TLSA{}, fmt.Errorf("lookup dane tlsa records: %w", err)
}
break
}
// Keep only the allowed usages.
if allowedUsages != nil {
o := 0
for _, r := range records {
if slices.Contains(allowedUsages, r.Usage) {
records[o] = r
o++
}
}
records = records[:o]
if len(records) == 0 {
// No point in dialing when we know we won't be able to verify the remote TLS
// certificate.
return nil, adns.TLSA{}, fmt.Errorf("no usable tlsa records remaining: %w", ErrNoMatch)
}
}
// We use the base domain for SNI, allowing the original domain as well.
// ../rfc/7671:1021
var moreAllowedHosts []dns.Domain
if baseDom != hostDom {
moreAllowedHosts = []dns.Domain{hostDom}
}
// Dial the remote host.
timeout := 30 * time.Second
if deadline, ok := ctx.Deadline(); ok && len(ips) > 0 {
timeout = time.Until(deadline) / time.Duration(len(ips))
}
dialer := &net.Dialer{Timeout: timeout}
var conn net.Conn
var dialErrs []error
for _, ip := range ips {
addr := net.JoinHostPort(ip.String(), portstr)
c, err := dialer.DialContext(ctx, network, addr)
if err != nil {
dialErrs = append(dialErrs, err)
continue
}
conn = c
break
}
if conn == nil {
return nil, adns.TLSA{}, errors.Join(dialErrs...)
}
var verifiedRecord adns.TLSA
config := TLSClientConfig(log.Logger, records, baseDom, moreAllowedHosts, &verifiedRecord, pkixRoots)
tlsConn := tls.Client(conn, &config)
if err := tlsConn.HandshakeContext(ctx); err != nil {
xerr := conn.Close()
log.Check(xerr, "closing connection")
return nil, adns.TLSA{}, err
}
return tlsConn, verifiedRecord, nil
}
// TLSClientConfig returns a tls.Config to be used for dialing/handshaking a
// TLS connection with DANE verification.
//
// Callers should only pass records that are allowed for the intended use. DANE
// with SMTP only allows DANE-EE and DANE-TA usages, not the PKIX-usages.
//
// The config has InsecureSkipVerify set to true, with a custom VerifyConnection
// function for verifying DANE. Its VerifyConnection can return ErrNoMatch and
// additionally one or more (wrapped) errors of type VerifyError.
//
// The TLS config uses allowedHost for SNI.
//
// If verifiedRecord is not nil, it is set to the record that was successfully
// verified, if any.
func TLSClientConfig(elog *slog.Logger, records []adns.TLSA, allowedHost dns.Domain, moreAllowedHosts []dns.Domain, verifiedRecord *adns.TLSA, pkixRoots *x509.CertPool) tls.Config {
log := mlog.New("dane", elog)
return tls.Config{
ServerName: allowedHost.ASCII, // For SNI.
InsecureSkipVerify: true,
VerifyConnection: func(cs tls.ConnectionState) error {
verified, record, err := Verify(log.Logger, records, cs, allowedHost, moreAllowedHosts, pkixRoots)
log.Debugx("dane verification", err, slog.Bool("verified", verified), slog.Any("record", record))
if verified {
if verifiedRecord != nil {
*verifiedRecord = record
}
return nil
} else if err == nil {
return ErrNoMatch
}
return fmt.Errorf("%w, and error(s) encountered during verification: %w", ErrNoMatch, err)
},
MinVersion: tls.VersionTLS12, // ../rfc/8996:31 ../rfc/8997:66
}
}
// Verify checks if the TLS connection state can be verified against DANE TLSA
// records.
//
// allowedHost along with the optional moreAllowedHosts are the host names that are
// allowed during certificate verification (as used by PKIX-TA, PKIX-EE, DANE-TA,
// but not DANE-EE). A typical connection would allow just one name, but some uses
// of DANE allow multiple, like SMTP which allow up to four valid names for a TLS
// certificate based on MX/CNAME/TLSA/DNSSEC lookup results.
//
// When one of the records matches, Verify returns true, along with the matching
// record and a nil error.
// If there is no match, then in the typical case Verify returns: false, a zero
// record value and a nil error.
// If an error is encountered while verifying a record, e.g. for x509
// trusted-anchor verification, an error may be returned, typically one or more
// (wrapped) errors of type VerifyError.
//
// Verify is useful when DANE verification and its results has to be done
// separately from other validation, e.g. for MTA-STS. The caller can create a
// tls.Config with a VerifyConnection function that checks DANE and MTA-STS
// separately.
func Verify(elog *slog.Logger, records []adns.TLSA, cs tls.ConnectionState, allowedHost dns.Domain, moreAllowedHosts []dns.Domain, pkixRoots *x509.CertPool) (verified bool, matching adns.TLSA, rerr error) {
log := mlog.New("dane", elog)
MetricVerify.Inc()
if len(records) == 0 {
MetricVerifyErrors.Inc()
return false, adns.TLSA{}, fmt.Errorf("verify requires at least one tlsa record")
}
var errs []error
for _, r := range records {
ok, err := verifySingle(log, r, cs, allowedHost, moreAllowedHosts, pkixRoots)
if err != nil {
errs = append(errs, VerifyError{err, r})
} else if ok {
return true, r, nil
}
}
MetricVerifyErrors.Inc()
return false, adns.TLSA{}, errors.Join(errs...)
}
// verifySingle verifies the TLS connection against a single DANE TLSA record.
//
// If the remote TLS certificate matches with the TLSA record, true is
// returned. Errors may be encountered while verifying, e.g. when checking one
// of the allowed hosts against a TLSA record. A typical non-matching/verified
// TLSA record returns a nil error. But in some cases, e.g. when encountering
// errors while verifying certificates against a trust-anchor, an error can be
// returned with one or more underlying x509 verification errors. A nil-nil error
// is only returned when verified is false.
func verifySingle(log mlog.Log, tlsa adns.TLSA, cs tls.ConnectionState, allowedHost dns.Domain, moreAllowedHosts []dns.Domain, pkixRoots *x509.CertPool) (verified bool, rerr error) {
if len(cs.PeerCertificates) == 0 {
return false, fmt.Errorf("no server certificate")
}
match := func(cert *x509.Certificate) bool {
var buf []byte
switch tlsa.Selector {
case adns.TLSASelectorCert:
buf = cert.Raw
case adns.TLSASelectorSPKI:
buf = cert.RawSubjectPublicKeyInfo
default:
return false
}
switch tlsa.MatchType {
case adns.TLSAMatchTypeFull:
case adns.TLSAMatchTypeSHA256:
d := sha256.Sum256(buf)
buf = d[:]
case adns.TLSAMatchTypeSHA512:
d := sha512.Sum512(buf)
buf = d[:]
default:
return false
}
return bytes.Equal(buf, tlsa.CertAssoc)
}
pkixVerify := func(host dns.Domain) ([][]*x509.Certificate, error) {
// Default Verify checks for expiration. We pass the host name to check. And we
// configure the intermediates. The roots are filled in by the x509 package.
opts := x509.VerifyOptions{
DNSName: host.ASCII,
Intermediates: x509.NewCertPool(),
Roots: pkixRoots,
}
for _, cert := range cs.PeerCertificates[1:] {
opts.Intermediates.AddCert(cert)
}
chains, err := cs.PeerCertificates[0].Verify(opts)
return chains, err
}
switch tlsa.Usage {
case adns.TLSAUsagePKIXTA:
// We cannot get at the system trusted ca certificates to look for the trusted
// anchor. So we just ask Go to verify, then see if any of the chains include the
// ca certificate.
var errs []error
for _, host := range append([]dns.Domain{allowedHost}, moreAllowedHosts...) {
chains, err := pkixVerify(host)
log.Debugx("pkix-ta verify", err)
if err != nil {
errs = append(errs, err)
continue
}
// The chains by x509's Verify should include the longest possible match, so it is
// sure to include the trusted anchor. ../rfc/7671:835
for _, chain := range chains {
// If pkix verified, check if any of the certificates match.
for i := len(chain) - 1; i >= 0; i-- {
if match(chain[i]) {
return true, nil
}
}
}
}
return false, errors.Join(errs...)
case adns.TLSAUsagePKIXEE:
// Check for a certificate match.
if !match(cs.PeerCertificates[0]) {
return false, nil
}
// And do regular pkix checks, ../rfc/7671:799
var errs []error
for _, host := range append([]dns.Domain{allowedHost}, moreAllowedHosts...) {
_, err := pkixVerify(host)
log.Debugx("pkix-ee verify", err)
if err == nil {
return true, nil
}
errs = append(errs, err)
}
return false, errors.Join(errs...)
case adns.TLSAUsageDANETA:
// We set roots, so the system defaults don't get used. Verify checks the host name
// (set below) and checks for expiration.
opts := x509.VerifyOptions{
Intermediates: x509.NewCertPool(),
Roots: x509.NewCertPool(),
}
// If the full certificate was included, we must add it to the valid roots, the TLS
// server may not send it. ../rfc/7671:692
var found bool
if tlsa.Selector == adns.TLSASelectorCert && tlsa.MatchType == adns.TLSAMatchTypeFull {
cert, err := x509.ParseCertificate(tlsa.CertAssoc)
if err != nil {
log.Debugx("parsing full exact certificate from tlsa record to use as root for usage dane-trusted-anchor", err)
// Continue anyway, perhaps the servers sends it again in a way that the tls package can parse? (unlikely)
} else {
opts.Roots.AddCert(cert)
found = true
}
}
for i, cert := range cs.PeerCertificates {
if match(cert) {
opts.Roots.AddCert(cert)
found = true
break
} else if i > 0 {
opts.Intermediates.AddCert(cert)
}
}
if !found {
// Trusted anchor was not found in TLS certificates so we won't be able to
// verify.
return false, nil
}
// Trusted anchor was found, still need to verify.
var errs []error
for _, host := range append([]dns.Domain{allowedHost}, moreAllowedHosts...) {
opts.DNSName = host.ASCII
_, err := cs.PeerCertificates[0].Verify(opts)
if err == nil {
return true, nil
}
errs = append(errs, err)
}
return false, errors.Join(errs...)
case adns.TLSAUsageDANEEE:
// ../rfc/7250 is about raw public keys instead of x.509 certificates in tls
// handshakes. Go's crypto/tls does not implement the extension (see
// crypto/tls/common.go, the extensions values don't appear in the
// rfc, but have values 19 and 20 according to
// https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#tls-extensiontype-values-1
// ../rfc/7671:1148 mentions the raw public keys are allowed. It's still
// questionable that this is commonly implemented. For now the world can probably
// live with an ignored certificate wrapped around the subject public key info.
// We don't verify host name in certificate, ../rfc/7671:489
// And we don't check for expiration. ../rfc/7671:527
// The whole point of this type is to have simple secure infrastructure that
// doesn't automatically expire (at the most inconvenient times).
return match(cs.PeerCertificates[0]), nil
default:
// Unknown, perhaps defined in the future. Not an error.
log.Debug("unrecognized tlsa usage, skipping", slog.Any("tlsausage", tlsa.Usage))
return false, nil
}
}

View File

@ -1,476 +0,0 @@
package dane
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
cryptorand "crypto/rand"
"crypto/sha256"
"crypto/sha512"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"errors"
"fmt"
"math/big"
"net"
"reflect"
"strconv"
"sync/atomic"
"testing"
"time"
"github.com/mjl-/adns"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
)
func tcheckf(t *testing.T, err error, format string, args ...any) {
t.Helper()
if err != nil {
t.Fatalf("%s: %s", fmt.Sprintf(format, args...), err)
}
}
// Test dialing and DANE TLS verification.
func TestDial(t *testing.T) {
log := mlog.New("dane", nil)
// Create fake CA/trusted-anchor certificate.
taTempl := x509.Certificate{
SerialNumber: big.NewInt(1), // Required field.
Subject: pkix.Name{CommonName: "fake ca"},
Issuer: pkix.Name{CommonName: "fake ca"},
NotBefore: time.Now().Add(-1 * time.Hour),
NotAfter: time.Now().Add(1 * time.Hour),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageServerAuth,
x509.ExtKeyUsageClientAuth,
},
BasicConstraintsValid: true,
IsCA: true,
MaxPathLen: 1,
}
taPriv, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
tcheckf(t, err, "generating trusted-anchor ca private key")
taCertBuf, err := x509.CreateCertificate(cryptorand.Reader, &taTempl, &taTempl, taPriv.Public(), taPriv)
tcheckf(t, err, "create trusted-anchor ca certificate")
taCert, err := x509.ParseCertificate(taCertBuf)
tcheckf(t, err, "parsing generated trusted-anchor ca certificate")
tacertsha256 := sha256.Sum256(taCert.Raw)
taCertSHA256 := tacertsha256[:]
// Generate leaf private key & 2 certs, one expired and one valid, both signed by
// trusted-anchor cert.
leafPriv, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader)
tcheckf(t, err, "generating leaf private key")
makeLeaf := func(expired bool) (tls.Certificate, []byte, []byte) {
now := time.Now()
if expired {
now = now.Add(-2 * time.Hour)
}
leafTempl := x509.Certificate{
SerialNumber: big.NewInt(1), // Required field.
Issuer: taTempl.Subject,
NotBefore: now.Add(-1 * time.Hour),
NotAfter: now.Add(1 * time.Hour),
DNSNames: []string{"localhost"},
}
leafCertBuf, err := x509.CreateCertificate(cryptorand.Reader, &leafTempl, taCert, leafPriv.Public(), taPriv)
tcheckf(t, err, "create trusted-anchor ca certificate")
leafCert, err := x509.ParseCertificate(leafCertBuf)
tcheckf(t, err, "parsing generated trusted-anchor ca certificate")
leafSPKISHA256 := sha256.Sum256(leafCert.RawSubjectPublicKeyInfo)
leafSPKISHA512 := sha512.Sum512(leafCert.RawSubjectPublicKeyInfo)
tlsLeafCert := tls.Certificate{
Certificate: [][]byte{leafCertBuf, taCertBuf},
PrivateKey: leafPriv, // .(crypto.PrivateKey),
Leaf: leafCert,
}
return tlsLeafCert, leafSPKISHA256[:], leafSPKISHA512[:]
}
tlsLeafCert, leafSPKISHA256, leafSPKISHA512 := makeLeaf(false)
tlsLeafCertExpired, _, _ := makeLeaf(true)
// Set up loopback tls server.
listenConn, err := net.Listen("tcp", "127.0.0.1:0")
tcheckf(t, err, "listen for test server")
addr := listenConn.Addr().String()
_, portstr, err := net.SplitHostPort(addr)
tcheckf(t, err, "get localhost port")
uport, err := strconv.ParseUint(portstr, 10, 16)
tcheckf(t, err, "parse localhost port")
port := int(uport)
defer listenConn.Close()
// Config for server, replaced during tests.
var tlsConfig atomic.Pointer[tls.Config]
tlsConfig.Store(&tls.Config{
Certificates: []tls.Certificate{tlsLeafCert},
})
// Loop handling incoming TLS connections.
go func() {
for {
conn, err := listenConn.Accept()
if err != nil {
return
}
tlsConn := tls.Server(conn, tlsConfig.Load())
tlsConn.Handshake()
tlsConn.Close()
}
}()
dialHost := "localhost"
var allowedUsages []adns.TLSAUsage
pkixRoots := x509.NewCertPool()
// Helper function for dialing with DANE.
test := func(resolver dns.Resolver, expRecord adns.TLSA, expErr any) {
t.Helper()
conn, record, err := Dial(context.Background(), log.Logger, resolver, "tcp", net.JoinHostPort(dialHost, portstr), allowedUsages, pkixRoots)
if err == nil {
conn.Close()
}
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr.(error)) && !errors.As(err, expErr) {
t.Fatalf("got err %v (%#v), expected %#v", err, err, expErr)
}
if !reflect.DeepEqual(record, expRecord) {
t.Fatalf("got verified record %v, expected %v", record, expRecord)
}
}
tlsaName := fmt.Sprintf("_%d._tcp.localhost.", port)
// Make all kinds of records, some invalid or non-matching.
var zeroRecord adns.TLSA
recordDANEEESPKISHA256 := adns.TLSA{
Usage: adns.TLSAUsageDANEEE,
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: leafSPKISHA256,
}
recordDANEEESPKISHA512 := adns.TLSA{
Usage: adns.TLSAUsageDANEEE,
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeSHA512,
CertAssoc: leafSPKISHA512,
}
recordDANEEESPKIFull := adns.TLSA{
Usage: adns.TLSAUsageDANEEE,
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeFull,
CertAssoc: tlsLeafCert.Leaf.RawSubjectPublicKeyInfo,
}
mismatchRecordDANEEESPKISHA256 := adns.TLSA{
Usage: adns.TLSAUsageDANEEE,
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: make([]byte, sha256.Size), // Zero, no match.
}
malformedRecordDANEEESPKISHA256 := adns.TLSA{
Usage: adns.TLSAUsageDANEEE,
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: leafSPKISHA256[:16], // Too short.
}
unknownparamRecordDANEEESPKISHA256 := adns.TLSA{
Usage: adns.TLSAUsage(10), // Unrecognized value.
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: leafSPKISHA256,
}
recordDANETACertSHA256 := adns.TLSA{
Usage: adns.TLSAUsageDANETA,
Selector: adns.TLSASelectorCert,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: taCertSHA256,
}
recordDANETACertFull := adns.TLSA{
Usage: adns.TLSAUsageDANETA,
Selector: adns.TLSASelectorCert,
MatchType: adns.TLSAMatchTypeFull,
CertAssoc: taCert.Raw,
}
malformedRecordDANETACertFull := adns.TLSA{
Usage: adns.TLSAUsageDANETA,
Selector: adns.TLSASelectorCert,
MatchType: adns.TLSAMatchTypeFull,
CertAssoc: taCert.Raw[1:], // Cannot parse certificate.
}
mismatchRecordDANETACertSHA256 := adns.TLSA{
Usage: adns.TLSAUsageDANETA,
Selector: adns.TLSASelectorCert,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: make([]byte, sha256.Size), // Zero, no match.
}
recordPKIXEESPKISHA256 := adns.TLSA{
Usage: adns.TLSAUsagePKIXEE,
Selector: adns.TLSASelectorSPKI,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: leafSPKISHA256,
}
recordPKIXTACertSHA256 := adns.TLSA{
Usage: adns.TLSAUsagePKIXTA,
Selector: adns.TLSASelectorCert,
MatchType: adns.TLSAMatchTypeSHA256,
CertAssoc: taCertSHA256,
}
resolver := dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANEEESPKISHA256}},
AllAuthentic: true,
}
// DANE-EE SPKI SHA2-256 record.
test(resolver, recordDANEEESPKISHA256, nil)
// Check that record isn't used if not allowed.
allowedUsages = []adns.TLSAUsage{adns.TLSAUsagePKIXTA}
test(resolver, zeroRecord, ErrNoMatch)
allowedUsages = nil // Restore.
// Mixed allowed/not allowed usages are fine.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {mismatchRecordDANETACertSHA256, recordDANEEESPKISHA256}},
AllAuthentic: true,
}
allowedUsages = []adns.TLSAUsage{adns.TLSAUsageDANEEE}
test(resolver, recordDANEEESPKISHA256, nil)
allowedUsages = nil // Restore.
// DANE-TA CERT SHA2-256 record.
resolver.TLSA = map[string][]adns.TLSA{
tlsaName: {recordDANETACertSHA256},
}
test(resolver, recordDANETACertSHA256, nil)
// No TLSA record.
resolver.TLSA = nil
test(resolver, zeroRecord, ErrNoRecords)
// Insecure TLSA record.
resolver.TLSA = map[string][]adns.TLSA{
tlsaName: {recordDANEEESPKISHA256},
}
resolver.Inauthentic = []string{"tlsa " + tlsaName}
test(resolver, zeroRecord, ErrInsecure)
// Insecure CNAME.
resolver.Inauthentic = []string{"cname localhost."}
test(resolver, zeroRecord, ErrInsecure)
// Insecure TLSA
resolver.Inauthentic = []string{"tlsa " + tlsaName}
test(resolver, zeroRecord, ErrInsecure)
// Insecure CNAME should not look at TLSA records under that name, only under original.
// Initial name/cname is secure. And it has secure TLSA records. But the lookup for
// example1 is not secure, though the final example2 records are.
resolver = dns.MockResolver{
A: map[string][]string{"example2.": {"127.0.0.1"}},
CNAME: map[string]string{"localhost.": "example1.", "example1.": "example2."},
TLSA: map[string][]adns.TLSA{
fmt.Sprintf("_%d._tcp.example2.", port): {mismatchRecordDANETACertSHA256}, // Should be ignored.
tlsaName: {recordDANEEESPKISHA256}, // Should match.
},
AllAuthentic: true,
Inauthentic: []string{"cname example1."},
}
test(resolver, recordDANEEESPKISHA256, nil)
// Matching records after following cname.
resolver = dns.MockResolver{
A: map[string][]string{"example.": {"127.0.0.1"}},
CNAME: map[string]string{"localhost.": "example."},
TLSA: map[string][]adns.TLSA{fmt.Sprintf("_%d._tcp.example.", port): {recordDANETACertSHA256}},
AllAuthentic: true,
}
test(resolver, recordDANETACertSHA256, nil)
// Fallback to original name for TLSA records if cname-expanded name doesn't have records.
resolver = dns.MockResolver{
A: map[string][]string{"example.": {"127.0.0.1"}},
CNAME: map[string]string{"localhost.": "example."},
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertSHA256}},
AllAuthentic: true,
}
test(resolver, recordDANETACertSHA256, nil)
// Invalid DANE-EE record.
resolver = dns.MockResolver{
A: map[string][]string{
"localhost.": {"127.0.0.1"},
},
TLSA: map[string][]adns.TLSA{
tlsaName: {mismatchRecordDANEEESPKISHA256},
},
AllAuthentic: true,
}
test(resolver, zeroRecord, ErrNoMatch)
// DANE-EE SPKI SHA2-512 record.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANEEESPKISHA512}},
AllAuthentic: true,
}
test(resolver, recordDANEEESPKISHA512, nil)
// DANE-EE SPKI Full record.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANEEESPKIFull}},
AllAuthentic: true,
}
test(resolver, recordDANEEESPKIFull, nil)
// DANE-TA with full certificate.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertFull}},
AllAuthentic: true,
}
test(resolver, recordDANETACertFull, nil)
// DANE-TA for cert not in TLS handshake.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {mismatchRecordDANETACertSHA256}},
AllAuthentic: true,
}
test(resolver, zeroRecord, ErrNoMatch)
// DANE-TA with leaf cert for other name.
resolver = dns.MockResolver{
A: map[string][]string{"example.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{fmt.Sprintf("_%d._tcp.example.", port): {recordDANETACertSHA256}},
AllAuthentic: true,
}
origDialHost := dialHost
dialHost = "example."
test(resolver, zeroRecord, ErrNoMatch)
dialHost = origDialHost
// DANE-TA with expired cert.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertSHA256}},
AllAuthentic: true,
}
tlsConfig.Store(&tls.Config{
Certificates: []tls.Certificate{tlsLeafCertExpired},
})
test(resolver, zeroRecord, ErrNoMatch)
test(resolver, zeroRecord, &VerifyError{})
test(resolver, zeroRecord, &x509.CertificateInvalidError{})
// Restore.
tlsConfig.Store(&tls.Config{
Certificates: []tls.Certificate{tlsLeafCert},
})
// Malformed TLSA record is unusable, resulting in failure if none left.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {malformedRecordDANEEESPKISHA256}},
AllAuthentic: true,
}
test(resolver, zeroRecord, ErrNoMatch)
// Malformed TLSA record is unusable and skipped, other verified record causes Dial to succeed.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {malformedRecordDANEEESPKISHA256, recordDANEEESPKISHA256}},
AllAuthentic: true,
}
test(resolver, recordDANEEESPKISHA256, nil)
// Record with unknown parameters (usage in this case) is unusable, resulting in failure if none left.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {unknownparamRecordDANEEESPKISHA256}},
AllAuthentic: true,
}
test(resolver, zeroRecord, ErrNoMatch)
// Unknown parameter does not prevent other valid record to verify.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {unknownparamRecordDANEEESPKISHA256, recordDANEEESPKISHA256}},
AllAuthentic: true,
}
test(resolver, recordDANEEESPKISHA256, nil)
// Malformed full TA certificate.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {malformedRecordDANETACertFull}},
AllAuthentic: true,
}
test(resolver, zeroRecord, ErrNoMatch)
// Full TA certificate without getting it from TLS server.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordDANETACertFull}},
AllAuthentic: true,
}
tlsLeafOnlyCert := tlsLeafCert
tlsLeafOnlyCert.Certificate = tlsLeafOnlyCert.Certificate[:1]
tlsConfig.Store(&tls.Config{
Certificates: []tls.Certificate{tlsLeafOnlyCert},
})
test(resolver, recordDANETACertFull, nil)
// Restore.
tlsConfig.Store(&tls.Config{
Certificates: []tls.Certificate{tlsLeafCert},
})
// PKIXEE, will fail due to not being CA-signed.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXEESPKISHA256}},
AllAuthentic: true,
}
test(resolver, zeroRecord, &x509.UnknownAuthorityError{})
// PKIXTA, will fail due to not being CA-signed.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXTACertSHA256}},
AllAuthentic: true,
}
test(resolver, zeroRecord, &x509.UnknownAuthorityError{})
// Now we add the TA to the "pkix" trusted roots and try again.
pkixRoots.AddCert(taCert)
// PKIXEE, will now succeed.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXEESPKISHA256}},
AllAuthentic: true,
}
test(resolver, recordPKIXEESPKISHA256, nil)
// PKIXTA, will fail due to not being CA-signed.
resolver = dns.MockResolver{
A: map[string][]string{"localhost.": {"127.0.0.1"}},
TLSA: map[string][]adns.TLSA{tlsaName: {recordPKIXTACertSHA256}},
AllAuthentic: true,
}
test(resolver, recordPKIXTACertSHA256, nil)
}

View File

@ -1,32 +0,0 @@
package dane_test
import (
"context"
"crypto/x509"
"log"
"log/slog"
"github.com/mjl-/adns"
"github.com/mjl-/mox/dane"
"github.com/mjl-/mox/dns"
)
func ExampleDial() {
ctx := context.Background()
resolver := dns.StrictResolver{}
usages := []adns.TLSAUsage{adns.TLSAUsageDANETA, adns.TLSAUsageDANEEE}
pkixRoots, err := x509.SystemCertPool()
if err != nil {
log.Fatalf("system pkix roots: %v", err)
}
// Connect to SMTP server, use STARTTLS, and verify TLS certificate with DANE.
conn, verifiedRecord, err := dane.Dial(ctx, slog.Default(), resolver, "tcp", "mx.example.com", usages, pkixRoots)
if err != nil {
log.Fatalf("dial: %v", err)
}
defer conn.Close()
log.Printf("connected, conn %v, verified record %s", conn, verifiedRecord)
}

View File

@ -1,125 +1,5 @@
This file has notes useful for mox developers. This file has notes useful for mox developers.
# Building & testing
For a full build, you'll need a recent Go compiler/toolchain and nodejs/npm for
the frontend. Run "make build" to do a full build. Run "make test" to run the
test suite. With docker installed, you can run "make test-integration" to start
up a few mox instances, a dns server, a postfix instance, and send email
between them.
The mox localserve command is a convenient way to test locally. Most of the
code paths are reachable/testable with mox localserve, but some use cases will
require a full setup.
Before committing, run at least "make fmt" and "make check" (which requires
staticcheck and ineffassign, run "make install-staticcheck install-ineffassign"
once). Also run "make check-shadow" and fix any shadowed variables other than
"err" (which are filtered out, but causes the command to always exit with an
error code; run "make install-shadow" once to install the shadow command). If
you've updated RFC references, run "make" in rfc/, it verifies the referenced
files exist.
When making changes to the public API of a package listed in
apidiff/packages.txt, run "make genapidiff" to update the list of changes in
the upcoming release (run "make install-apidiff" once to install the apidiff
command).
New features may be worth mentioning on the website, see website/ and
instructions below.
# Code style, guidelines, notes
- Keep the same style as existing code.
- For Windows: use package "path/filepath" when dealing with files/directories.
Test code can pass forward-slashed paths directly to standard library functions,
but use proper filepath functions when parameters are passed and in non-test
code. Mailbox names always use forward slash, so use package "path" for mailbox
name/path manipulation. Do not remove/rename files that are still open.
- Not all code uses adns, the DNSSEC-aware resolver. Such as code that makes
http requests, like mtasts and autotls/autocert.
- We don't have an internal/ directory, really just to prevent long paths in
the repo, and to keep all Go code matching *.go */*.go (without matching
vendor/). Part of the packages are reusable by other software. Those reusable
packages must not cause mox implementation details (such as bstore) to get out,
which would cause unexpected dependencies. Those packages also only expose the
standard slog package for logging, not our mlog package. Packages not intended
for reuse do use mlog as it is more convenient. Internally, we always use
mlog.Log to do the logging, wrapping an slog.Logger.
- The code uses panic for error handling in quite a few places, including
smtpserver, imapserver and web API calls. Functions/methods, variables, struct
fields and types that begin with an "x" indicate they can panic on errors. Both
for i/o errors that are fatal for a connection, and also often for user-induced
errors, for example bad IMAP commands or invalid web API requests. These panics
are caught again at the top of a command or top of the connection. Write code
that is panic-safe, using defer to clean up and release resources.
- Try to check all errors, at the minimum using mlog.Log.Check() to log an error
at the appropriate level. Also when just closing a file. Log messages sometimes
unexpectedly point out latent issues. Only when there is no point in logging,
for example when previous writes to stderr failed, can error logging be skipped.
Test code is less strict about checking errors.
# Reusable packages
Most non-server Go packages are meant to be reusable. This means internal
details are not exposed in the API, and we don't make unneeded changes. We can
still make breaking changes when it improves mox: We don't want to be stuck
with bad API. Third party users aren't affected too seriously due to Go's
minimal version selection. The reusable packages are in apidiff/packages.txt.
We generate the incompatible changes with each release.
# Web interfaces/frontend
The web interface frontends (for webmail/, webadmin/ and webaccount/) are
written in strict TypeScript. The web API is a simple self-documenting
HTTP/JSON RPC API mechanism called sherpa,
https://www.ueber.net/who/mjl/sherpa/. The web API exposes types and functions
as implemented in Go, using https://github.com/mjl-/sherpa. API definitions in
JSON form are generated with https://github.com/mjl-/sherpadoc. Those API
definitions are used to generate TypeScript clients with by
https://github.com/mjl-/sherpats/.
The JavaScript that is generated from the TypeScript is included in the
repository. This makes it available for inclusion in the binary, which is
practical for users, and desirable given Go's reproducible builds. When
developing, run "make" to also build the frontend code. Run "make
install-frontend" once to install the TypeScript compiler into ./node_modules/.
There are no other external (runtime or devtime) frontend dependencies. A
light-weight abstraction over the DOM is provided by ./lib.ts. A bit more
manual UI state management must be done compared to "frameworks", but it is
little code, and this allows JavaScript/TypeScript developer to quickly get
started. UI state is often encapsulated in a JavaScript object with a
TypeScript interface exposing a "root" HTMLElement that is added to the DOM,
and functions for accessing/changing the internal state, keeping the UI
managable.
# Website
The content of the public website at https://www.xmox.nl is in website/, as
markdown files. The website HTML is generated with "make genwebsite", which
writes to website/html/ (files not committed). The FAQ is taken from
README.md, the protocol support table is generated from rfc/index.txt. The
website is kept in this repository so a commit can change both the
implementation and the documentation on the website. Some of the info in
README.md is duplicated on the website, often more elaborate and possibly with
a slightly less technical audience. The website should also mostly be readable
through the markdown in the git repo.
Large files (images/videos) are in https://github.com/mjl-/mox-website-files to
keep the repository reasonably sized.
The public website may serve the content from the "website" branch. After a
release, the main branch (with latest development code and corresponding
changes to the website about new features) is merged into the website branch.
Commits to the website branch (e.g. for a news item, or any other change
unrelated to a new release) is merged back into the main branch.
# TLS certificates # TLS certificates
https://github.com/cloudflare/cfssl is useful for testing with TLS https://github.com/cloudflare/cfssl is useful for testing with TLS
@ -200,7 +80,6 @@ Listeners:
KeyFile: ../../cfssl/wildcard.$domain-key.pem KeyFile: ../../cfssl/wildcard.$domain-key.pem
``` ```
# ACME # ACME
https://github.com/letsencrypt/pebble is useful for testing with ACME. Start a https://github.com/letsencrypt/pebble is useful for testing with ACME. Start a
@ -279,67 +158,25 @@ non-testing purposes. Unfortunately, this also makes it inconvenient to use for
testing purposes. testing purposes.
# Messages for testing
For compatibility and preformance testing, it helps to have many messages,
created a long time ago and recently, by different mail user agents. A helpful
source is the Linux kernel mailing list. Archives are available as multiple git
repositories (split due to size) at
https://lore.kernel.org/lkml/_/text/mirror/. The git repo's can be converted
to compressed mbox files (about 800MB each) with:
```
# 0 is the first epoch (with over half a million messages), 12 is last
# already-complete epoch at the time of writing (with a quarter million
# messages). The archives are large, converting will take some time.
for i in 0 12; do
git clone --mirror http://lore.kernel.org/lkml/$i lkml-$i.git
(cd lkml-$i.git && time ./tombox.sh | gzip >../lkml-$i.mbox.gz)
done
```
With the following "tombox.sh" script:
```
#!/bin/sh
pre=''
for rev in $(git rev-list master | reverse); do
printf "$pre"
echo "From sender@host $(date '+%a %b %e %H:%M:%S %Y' -d @$(git show -s --format=%ct $rev))"
git show ${rev}:m | sed 's/^>*From />&/'
pre='\n'
done
```
# Release proces # Release proces
- Gather feedback on recent changes. - Gather feedback on recent changes.
- Check if dependencies need updates. - Check if dependencies need updates.
- Update to latest publicsuffix/ list.
- Check code if there are deprecated features that can be removed. - Check code if there are deprecated features that can be removed.
- Generate apidiff and check if breaking changes can be prevented. Update moxtools. - Update features & roadmap in README.md
- Update features & roadmap in README.md and website. - Write release notes, use instructions from updating.txt.
- Write release notes, copy from previous. - Build and run tests with previous major Go release.
- Build and run tests with previous major Go release, run "make docker-release" to test building images. - Run all (integration) tests, including with race detector.
- Run tests, including with race detector, also with TZ= for UTC-behaviour, and with -count 2. - Test upgrades.
- Run integration and upgrade tests.
- Run fuzzing tests for a while. - Run fuzzing tests for a while.
- Deploy to test environment. Test the update instructions. - Deploy to test environment. Test the update instructions.
- Test mox localserve on various OSes (linux, bsd, macos, windows). - Generate a config with quickstart, check if it results in a working setup.
- Send and receive email through the major webmail providers, check headers. - Send and receive email through the major webmail providers, check headers.
- Send and receive email with imap4/smtp clients. - Send and receive email with imap4/smtp clients.
- Check DNS check admin page. - Check DNS check admin page.
- Check with https://internet.nl. - Check with https://internet.nl
- Move apidiff/next.txt to apidiff/<version>.txt, and create empty next.txt. - Clear updating.txt.
- Add release to the Latest release & News sections of website/index.md. - Create git tag, push code.
- Create git tag (note: "#" is comment, not title/header), push code. - Publish new docker image.
- Build and publish new docker image.
- Deploy update to website.
- Create new release on the github page, so watchers get a notification.
Copy/paste it manually from the tag text, and add link to download/compile
instructions to prevent confusion about "assets" github links to.
- Publish new cross-referenced code/rfc to www.xmox.nl/xr/.
- Update moxtools with latest version.
- Update implementations support matrix.
- Publish signed release notes for updates.xmox.nl and update DNS record. - Publish signed release notes for updates.xmox.nl and update DNS record.
- Create new release on the github page, so watchers get a notification.

View File

@ -21,25 +21,43 @@ import (
"fmt" "fmt"
"hash" "hash"
"io" "io"
"log/slog"
"strings" "strings"
"time" "time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/moxio" "github.com/mjl-/mox/moxio"
"github.com/mjl-/mox/publicsuffix" "github.com/mjl-/mox/publicsuffix"
"github.com/mjl-/mox/smtp" "github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/stub"
"slices"
) )
// If set, signatures for top-level domain "localhost" are accepted. var xlog = mlog.New("dkim")
var Localserve bool
var ( var (
MetricSign stub.CounterVec = stub.CounterVecIgnore{} metricDKIMSign = promauto.NewCounterVec(
MetricVerify stub.HistogramVec = stub.HistogramVecIgnore{} prometheus.CounterOpts{
Name: "mox_dkim_sign_total",
Help: "DKIM messages signings.",
},
[]string{
"key",
},
)
metricDKIMVerify = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "mox_dkim_verify_duration_seconds",
Help: "DKIM verify, including lookup, duration and result.",
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20},
},
[]string{
"algorithm",
"status",
},
)
) )
var timeNow = time.Now // Replaced during tests. var timeNow = time.Now // Replaced during tests.
@ -95,45 +113,20 @@ var (
// To decide what to do with a message, both the signature parameters and the DNS // To decide what to do with a message, both the signature parameters and the DNS
// TXT record have to be consulted. // TXT record have to be consulted.
type Result struct { type Result struct {
Status Status Status Status
Sig *Sig // Parsed form of DKIM-Signature header. Can be nil for invalid DKIM-Signature header. Sig *Sig // Parsed form of DKIM-Signature header. Can be nil for invalid DKIM-Signature header.
Record *Record // Parsed form of DKIM DNS record for selector and domain in Sig. Optional. Record *Record // Parsed form of DKIM DNS record for selector and domain in Sig. Optional.
RecordAuthentic bool // Whether DKIM DNS record was DNSSEC-protected. Only valid if Sig is non-nil. Err error // If Status is not StatusPass, this error holds the details and can be checked using errors.Is.
Err error // If Status is not StatusPass, this error holds the details and can be checked using errors.Is.
} }
// todo: use some io.Writer to hash the body and the header. // todo: use some io.Writer to hash the body and the header.
// Selector holds selectors and key material to generate DKIM signatures.
type Selector struct {
Hash string // "sha256" or the older "sha1".
HeaderRelaxed bool // If the header is canonicalized in relaxed instead of simple mode.
BodyRelaxed bool // If the body is canonicalized in relaxed instead of simple mode.
Headers []string // Headers to include in signature.
// Whether to "oversign" headers, ensuring additional/new values of existing
// headers cannot be added.
SealHeaders bool
// If > 0, period a signature is valid after signing, as duration, e.g. 72h. The
// period should be enough for delivery at the final destination, potentially with
// several hops/relays. In the order of days at least.
Expiration time.Duration
PrivateKey crypto.Signer // Either an *rsa.PrivateKey or ed25519.PrivateKey.
Domain dns.Domain // Of selector only, not FQDN.
}
// Sign returns line(s) with DKIM-Signature headers, generated according to the configuration. // Sign returns line(s) with DKIM-Signature headers, generated according to the configuration.
func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, domain dns.Domain, selectors []Selector, smtputf8 bool, msg io.ReaderAt) (headers string, rerr error) { func Sign(ctx context.Context, localpart smtp.Localpart, domain dns.Domain, c config.DKIM, smtputf8 bool, msg io.ReaderAt) (headers string, rerr error) {
log := mlog.New("dkim", elog) log := xlog.WithContext(ctx)
start := timeNow() start := timeNow()
defer func() { defer func() {
log.Debugx("dkim sign result", rerr, log.Debugx("dkim sign result", rerr, mlog.Field("localpart", localpart), mlog.Field("domain", domain), mlog.Field("smtputf8", smtputf8), mlog.Field("duration", time.Since(start)))
slog.Any("localpart", localpart),
slog.Any("domain", domain),
slog.Bool("smtputf8", smtputf8),
slog.Duration("duration", time.Since(start)))
}() }()
hdrs, bodyOffset, err := parseHeaders(bufio.NewReader(&moxio.AtReader{R: msg})) hdrs, bodyOffset, err := parseHeaders(bufio.NewReader(&moxio.AtReader{R: msg}))
@ -157,25 +150,26 @@ func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, doma
var bodyHashes = map[hashKey][]byte{} var bodyHashes = map[hashKey][]byte{}
for _, sel := range selectors { for _, sign := range c.Sign {
sel := c.Selectors[sign]
sig := newSigWithDefaults() sig := newSigWithDefaults()
sig.Version = 1 sig.Version = 1
switch sel.PrivateKey.(type) { switch sel.Key.(type) {
case *rsa.PrivateKey: case *rsa.PrivateKey:
sig.AlgorithmSign = "rsa" sig.AlgorithmSign = "rsa"
MetricSign.IncLabels("rsa") metricDKIMSign.WithLabelValues("rsa").Inc()
case ed25519.PrivateKey: case ed25519.PrivateKey:
sig.AlgorithmSign = "ed25519" sig.AlgorithmSign = "ed25519"
MetricSign.IncLabels("ed25519") metricDKIMSign.WithLabelValues("ed25519").Inc()
default: default:
return "", fmt.Errorf("internal error, unknown pivate key %T", sel.PrivateKey) return "", fmt.Errorf("internal error, unknown pivate key %T", sel.Key)
} }
sig.AlgorithmHash = sel.Hash sig.AlgorithmHash = sel.HashEffective
sig.Domain = domain sig.Domain = domain
sig.Selector = sel.Domain sig.Selector = sel.Domain
sig.Identity = &Identity{&localpart, domain} sig.Identity = &Identity{&localpart, domain}
sig.SignedHeaders = slices.Clone(sel.Headers) sig.SignedHeaders = append([]string{}, sel.HeadersEffective...)
if sel.SealHeaders { if !sel.DontSealHeaders {
// ../rfc/6376:2156 // ../rfc/6376:2156
// Each time a header name is added to the signature, the next unused value is // Each time a header name is added to the signature, the next unused value is
// signed (in reverse order as they occur in the message). So we can add each // signed (in reverse order as they occur in the message). So we can add each
@ -185,23 +179,23 @@ func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, doma
for _, h := range hdrs { for _, h := range hdrs {
counts[h.lkey]++ counts[h.lkey]++
} }
for _, h := range sel.Headers { for _, h := range sel.HeadersEffective {
for j := counts[strings.ToLower(h)]; j > 0; j-- { for j := counts[strings.ToLower(h)]; j > 0; j-- {
sig.SignedHeaders = append(sig.SignedHeaders, h) sig.SignedHeaders = append(sig.SignedHeaders, h)
} }
} }
} }
sig.SignTime = timeNow().Unix() sig.SignTime = timeNow().Unix()
if sel.Expiration > 0 { if sel.ExpirationSeconds > 0 {
sig.ExpireTime = sig.SignTime + int64(sel.Expiration/time.Second) sig.ExpireTime = sig.SignTime + int64(sel.ExpirationSeconds)
} }
sig.Canonicalization = "simple" sig.Canonicalization = "simple"
if sel.HeaderRelaxed { if sel.Canonicalization.HeaderRelaxed {
sig.Canonicalization = "relaxed" sig.Canonicalization = "relaxed"
} }
sig.Canonicalization += "/" sig.Canonicalization += "/"
if sel.BodyRelaxed { if sel.Canonicalization.BodyRelaxed {
sig.Canonicalization += "relaxed" sig.Canonicalization += "relaxed"
} else { } else {
sig.Canonicalization += "simple" sig.Canonicalization += "simple"
@ -218,12 +212,12 @@ func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, doma
// DKIM-Signature header. // DKIM-Signature header.
// ../rfc/6376:1700 // ../rfc/6376:1700
hk := hashKey{!sel.BodyRelaxed, strings.ToLower(sig.AlgorithmHash)} hk := hashKey{!sel.Canonicalization.BodyRelaxed, strings.ToLower(sig.AlgorithmHash)}
if bh, ok := bodyHashes[hk]; ok { if bh, ok := bodyHashes[hk]; ok {
sig.BodyHash = bh sig.BodyHash = bh
} else { } else {
br := bufio.NewReader(&moxio.AtReader{R: msg, Offset: int64(bodyOffset)}) br := bufio.NewReader(&moxio.AtReader{R: msg, Offset: int64(bodyOffset)})
bh, err = bodyHash(h.New(), !sel.BodyRelaxed, br) bh, err = bodyHash(h.New(), !sel.Canonicalization.BodyRelaxed, br)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -237,12 +231,12 @@ func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, doma
} }
verifySig := []byte(strings.TrimSuffix(sigh, "\r\n")) verifySig := []byte(strings.TrimSuffix(sigh, "\r\n"))
dh, err := dataHash(h.New(), !sel.HeaderRelaxed, sig, hdrs, verifySig) dh, err := dataHash(h.New(), !sel.Canonicalization.HeaderRelaxed, sig, hdrs, verifySig)
if err != nil { if err != nil {
return "", err return "", err
} }
switch key := sel.PrivateKey.(type) { switch key := sel.Key.(type) {
case *rsa.PrivateKey: case *rsa.PrivateKey:
sig.Signature, err = key.Sign(cryptorand.Reader, dh, h) sig.Signature, err = key.Sign(cryptorand.Reader, dh, h)
if err != nil { if err != nil {
@ -273,29 +267,22 @@ func Sign(ctx context.Context, elog *slog.Logger, localpart smtp.Localpart, doma
// //
// A requested record is <selector>._domainkey.<domain>. Exactly one valid DKIM // A requested record is <selector>._domainkey.<domain>. Exactly one valid DKIM
// record should be present. // record should be present.
// func Lookup(ctx context.Context, resolver dns.Resolver, selector, domain dns.Domain) (rstatus Status, rrecord *Record, rtxt string, rerr error) {
// authentic indicates if DNS results were DNSSEC-verified. log := xlog.WithContext(ctx)
func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, selector, domain dns.Domain) (rstatus Status, rrecord *Record, rtxt string, authentic bool, rerr error) {
log := mlog.New("dkim", elog)
start := timeNow() start := timeNow()
defer func() { defer func() {
log.Debugx("dkim lookup result", rerr, log.Debugx("dkim lookup result", rerr, mlog.Field("selector", selector), mlog.Field("domain", domain), mlog.Field("status", rstatus), mlog.Field("record", rrecord), mlog.Field("duration", time.Since(start)))
slog.Any("selector", selector),
slog.Any("domain", domain),
slog.Any("status", rstatus),
slog.Any("record", rrecord),
slog.Duration("duration", time.Since(start)))
}() }()
name := selector.ASCII + "._domainkey." + domain.ASCII + "." name := selector.ASCII + "._domainkey." + domain.ASCII + "."
records, lookupResult, err := dns.WithPackage(resolver, "dkim").LookupTXT(ctx, name) records, err := dns.WithPackage(resolver, "dkim").LookupTXT(ctx, name)
if dns.IsNotFound(err) { if dns.IsNotFound(err) {
// ../rfc/6376:2608 // ../rfc/6376:2608
// We must return StatusPermerror. We may want to return StatusTemperror because in // We must return StatusPermerror. We may want to return StatusTemperror because in
// practice someone will start using a new key before DNS changes have propagated. // practice someone will start using a new key before DNS changes have propagated.
return StatusPermerror, nil, "", lookupResult.Authentic, fmt.Errorf("%w: dns name %q", ErrNoRecord, name) return StatusPermerror, nil, "", fmt.Errorf("%w: dns name %q", ErrNoRecord, name)
} else if err != nil { } else if err != nil {
return StatusTemperror, nil, "", lookupResult.Authentic, fmt.Errorf("%w: dns name %q: %s", ErrDNS, name, err) return StatusTemperror, nil, "", fmt.Errorf("%w: dns name %q: %s", ErrDNS, name, err)
} }
// ../rfc/6376:2612 // ../rfc/6376:2612
@ -311,7 +298,7 @@ func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, selec
var isdkim bool var isdkim bool
r, isdkim, err = ParseRecord(s) r, isdkim, err = ParseRecord(s)
if err != nil && isdkim { if err != nil && isdkim {
return StatusPermerror, nil, txt, lookupResult.Authentic, fmt.Errorf("%w: %s", ErrSyntax, err) return StatusPermerror, nil, txt, fmt.Errorf("%w: %s", ErrSyntax, err)
} else if err != nil { } else if err != nil {
// Hopefully the remote MTA admin discovers the configuration error and fix it for // Hopefully the remote MTA admin discovers the configuration error and fix it for
// an upcoming delivery attempt, in case we rejected with temporary status. // an upcoming delivery attempt, in case we rejected with temporary status.
@ -323,7 +310,7 @@ func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, selec
// ../rfc/6376:1609 // ../rfc/6376:1609
// ../rfc/6376:2584 // ../rfc/6376:2584
if record != nil { if record != nil {
return StatusTemperror, nil, "", lookupResult.Authentic, fmt.Errorf("%w: dns name %q", ErrMultipleRecords, name) return StatusTemperror, nil, "", fmt.Errorf("%w: dns name %q", ErrMultipleRecords, name)
} }
record = r record = r
txt = s txt = s
@ -331,9 +318,9 @@ func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, selec
} }
if record == nil { if record == nil {
return status, nil, "", lookupResult.Authentic, err return status, nil, "", err
} }
return StatusNeutral, record, txt, lookupResult.Authentic, nil return StatusNeutral, record, txt, nil
} }
// Verify parses the DKIM-Signature headers in a message and verifies each of them. // Verify parses the DKIM-Signature headers in a message and verifies each of them.
@ -348,8 +335,8 @@ func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, selec
// verification failure is treated as actual failure. With ignoreTestMode // verification failure is treated as actual failure. With ignoreTestMode
// false, such verification failures are treated as if there is no signature by // false, such verification failures are treated as if there is no signature by
// returning StatusNone. // returning StatusNone.
func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, smtputf8 bool, policy func(*Sig) error, r io.ReaderAt, ignoreTestMode bool) (results []Result, rerr error) { func Verify(ctx context.Context, resolver dns.Resolver, smtputf8 bool, policy func(*Sig) error, r io.ReaderAt, ignoreTestMode bool) (results []Result, rerr error) {
log := mlog.New("dkim", elog) log := xlog.WithContext(ctx)
start := timeNow() start := timeNow()
defer func() { defer func() {
duration := float64(time.Since(start)) / float64(time.Second) duration := float64(time.Since(start)) / float64(time.Second)
@ -359,19 +346,14 @@ func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, smtpu
alg = r.Sig.Algorithm() alg = r.Sig.Algorithm()
} }
status := string(r.Status) status := string(r.Status)
MetricVerify.ObserveLabels(duration, alg, status) metricDKIMVerify.WithLabelValues(alg, status).Observe(duration)
} }
if len(results) == 0 { if len(results) == 0 {
log.Debugx("dkim verify result", rerr, slog.Bool("smtputf8", smtputf8), slog.Duration("duration", time.Since(start))) log.Debugx("dkim verify result", rerr, mlog.Field("smtputf8", smtputf8), mlog.Field("duration", time.Since(start)))
} }
for _, result := range results { for _, result := range results {
log.Debugx("dkim verify result", result.Err, log.Debugx("dkim verify result", result.Err, mlog.Field("smtputf8", smtputf8), mlog.Field("status", result.Status), mlog.Field("sig", result.Sig), mlog.Field("record", result.Record), mlog.Field("duration", time.Since(start)))
slog.Bool("smtputf8", smtputf8),
slog.Any("status", result.Status),
slog.Any("sig", result.Sig),
slog.Any("record", result.Record),
slog.Duration("duration", time.Since(start)))
} }
}() }()
@ -391,33 +373,33 @@ func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, smtpu
if err != nil { if err != nil {
// ../rfc/6376:2503 // ../rfc/6376:2503
err := fmt.Errorf("parsing DKIM-Signature header: %w", err) err := fmt.Errorf("parsing DKIM-Signature header: %w", err)
results = append(results, Result{StatusPermerror, nil, nil, false, err}) results = append(results, Result{StatusPermerror, nil, nil, err})
continue continue
} }
h, canonHeaderSimple, canonDataSimple, err := checkSignatureParams(ctx, log, sig) h, canonHeaderSimple, canonDataSimple, err := checkSignatureParams(ctx, sig)
if err != nil { if err != nil {
results = append(results, Result{StatusPermerror, sig, nil, false, err}) results = append(results, Result{StatusPermerror, nil, nil, err})
continue continue
} }
// ../rfc/6376:2560 // ../rfc/6376:2560
if err := policy(sig); err != nil { if err := policy(sig); err != nil {
err := fmt.Errorf("%w: %s", ErrPolicy, err) err := fmt.Errorf("%w: %s", ErrPolicy, err)
results = append(results, Result{StatusPolicy, sig, nil, false, err}) results = append(results, Result{StatusPolicy, nil, nil, err})
continue continue
} }
br := bufio.NewReader(&moxio.AtReader{R: r, Offset: int64(bodyOffset)}) br := bufio.NewReader(&moxio.AtReader{R: r, Offset: int64(bodyOffset)})
status, txt, authentic, err := verifySignature(ctx, log.Logger, resolver, sig, h, canonHeaderSimple, canonDataSimple, hdrs, verifySig, br, ignoreTestMode) status, txt, err := verifySignature(ctx, resolver, sig, h, canonHeaderSimple, canonDataSimple, hdrs, verifySig, br, ignoreTestMode)
results = append(results, Result{status, sig, txt, authentic, err}) results = append(results, Result{status, sig, txt, err})
} }
return results, nil return results, nil
} }
// check if signature is acceptable. // check if signature is acceptable.
// Only looks at the signature parameters, not at the DNS record. // Only looks at the signature parameters, not at the DNS record.
func checkSignatureParams(ctx context.Context, log mlog.Log, sig *Sig) (hash crypto.Hash, canonHeaderSimple, canonBodySimple bool, rerr error) { func checkSignatureParams(ctx context.Context, sig *Sig) (hash crypto.Hash, canonHeaderSimple, canonBodySimple bool, rerr error) {
// "From" header is required, ../rfc/6376:2122 ../rfc/6376:2546 // "From" header is required, ../rfc/6376:2122 ../rfc/6376:2546
var from bool var from bool
for _, h := range sig.SignedHeaders { for _, h := range sig.SignedHeaders {
@ -446,7 +428,7 @@ func checkSignatureParams(ctx context.Context, log mlog.Log, sig *Sig) (hash cry
if subdom.Unicode != "" { if subdom.Unicode != "" {
subdom.Unicode = "x." + subdom.Unicode subdom.Unicode = "x." + subdom.Unicode
} }
if orgDom := publicsuffix.Lookup(ctx, log.Logger, subdom); subdom.ASCII == orgDom.ASCII && !(Localserve && sig.Domain.ASCII == "localhost") { if orgDom := publicsuffix.Lookup(ctx, subdom); subdom.ASCII == orgDom.ASCII {
return 0, false, false, fmt.Errorf("%w: %s", ErrTLD, sig.Domain) return 0, false, false, fmt.Errorf("%w: %s", ErrTLD, sig.Domain)
} }
@ -495,15 +477,15 @@ func checkSignatureParams(ctx context.Context, log mlog.Log, sig *Sig) (hash cry
} }
// lookup the public key in the DNS and verify the signature. // lookup the public key in the DNS and verify the signature.
func verifySignature(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, sig *Sig, hash crypto.Hash, canonHeaderSimple, canonDataSimple bool, hdrs []header, verifySig []byte, body *bufio.Reader, ignoreTestMode bool) (Status, *Record, bool, error) { func verifySignature(ctx context.Context, resolver dns.Resolver, sig *Sig, hash crypto.Hash, canonHeaderSimple, canonDataSimple bool, hdrs []header, verifySig []byte, body *bufio.Reader, ignoreTestMode bool) (Status, *Record, error) {
// ../rfc/6376:2604 // ../rfc/6376:2604
status, record, _, authentic, err := Lookup(ctx, elog, resolver, sig.Selector, sig.Domain) status, record, _, err := Lookup(ctx, resolver, sig.Selector, sig.Domain)
if err != nil { if err != nil {
// todo: for temporary errors, we could pass on information so caller returns a 4.7.5 ecode, ../rfc/6376:2777 // todo: for temporary errors, we could pass on information so caller returns a 4.7.5 ecode, ../rfc/6376:2777
return status, nil, authentic, err return status, nil, err
} }
status, err = verifySignatureRecord(record, sig, hash, canonHeaderSimple, canonDataSimple, hdrs, verifySig, body, ignoreTestMode) status, err = verifySignatureRecord(record, sig, hash, canonHeaderSimple, canonDataSimple, hdrs, verifySig, body, ignoreTestMode)
return status, record, authentic, err return status, record, err
} }
// verify a DKIM signature given the record from dns and signature from the email message. // verify a DKIM signature given the record from dns and signature from the email message.
@ -549,7 +531,7 @@ func verifySignatureRecord(r *Record, sig *Sig, hash crypto.Hash, canonHeaderSim
if r.PublicKey == nil { if r.PublicKey == nil {
return StatusPermerror, ErrKeyRevoked return StatusPermerror, ErrKeyRevoked
} else if rsaKey, ok := r.PublicKey.(*rsa.PublicKey); ok && rsaKey.N.BitLen() < 1024 { } else if rsaKey, ok := r.PublicKey.(*rsa.PublicKey); ok && rsaKey.N.BitLen() < 1024 {
// ../rfc/8301:157 // todo: find a reference that supports this.
return StatusPermerror, ErrWeakKey return StatusPermerror, ErrWeakKey
} }
@ -840,8 +822,8 @@ func parseHeaders(br *bufio.Reader) ([]header, int, error) {
return nil, 0, fmt.Errorf("empty header key") return nil, 0, fmt.Errorf("empty header key")
} }
lkey = strings.ToLower(key) lkey = strings.ToLower(key)
value = slices.Clone(t[1]) value = append([]byte{}, t[1]...)
raw = slices.Clone(line) raw = append([]byte{}, line...)
} }
if key != "" { if key != "" {
l = append(l, header{key, lkey, value, raw}) l = append(l, header{key, lkey, value, raw})

View File

@ -15,12 +15,10 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
) )
var pkglog = mlog.New("dkim", nil)
func policyOK(sig *Sig) error { func policyOK(sig *Sig) error {
return nil return nil
} }
@ -145,7 +143,7 @@ test
}, },
} }
results, err := Verify(context.Background(), pkglog.Logger, resolver, false, policyOK, strings.NewReader(message), false) results, err := Verify(context.Background(), resolver, false, policyOK, strings.NewReader(message), false)
if err != nil { if err != nil {
t.Fatalf("dkim verify: %v", err) t.Fatalf("dkim verify: %v", err)
} }
@ -192,7 +190,7 @@ Joe.
}, },
} }
results, err := Verify(context.Background(), pkglog.Logger, resolver, false, policyOK, strings.NewReader(message), false) results, err := Verify(context.Background(), resolver, false, policyOK, strings.NewReader(message), false)
if err != nil { if err != nil {
t.Fatalf("dkim verify: %v", err) t.Fatalf("dkim verify: %v", err)
} }
@ -221,42 +219,50 @@ test
rsaKey := getRSAKey(t) rsaKey := getRSAKey(t)
ed25519Key := ed25519.NewKeyFromSeed(make([]byte, 32)) ed25519Key := ed25519.NewKeyFromSeed(make([]byte, 32))
selrsa := Selector{ selrsa := config.Selector{
Hash: "sha256", HashEffective: "sha256",
PrivateKey: rsaKey, Key: rsaKey,
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
Domain: dns.Domain{ASCII: "testrsa"}, Domain: dns.Domain{ASCII: "testrsa"},
} }
// Now with sha1 and relaxed canonicalization. // Now with sha1 and relaxed canonicalization.
selrsa2 := Selector{ selrsa2 := config.Selector{
Hash: "sha1", HashEffective: "sha1",
PrivateKey: rsaKey, Key: rsaKey,
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
Domain: dns.Domain{ASCII: "testrsa2"}, Domain: dns.Domain{ASCII: "testrsa2"},
} }
selrsa2.HeaderRelaxed = true selrsa2.Canonicalization.HeaderRelaxed = true
selrsa2.BodyRelaxed = true selrsa2.Canonicalization.BodyRelaxed = true
// Ed25519 key. // Ed25519 key.
seled25519 := Selector{ seled25519 := config.Selector{
Hash: "sha256", HashEffective: "sha256",
PrivateKey: ed25519Key, Key: ed25519Key,
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
Domain: dns.Domain{ASCII: "tested25519"}, Domain: dns.Domain{ASCII: "tested25519"},
} }
// Again ed25519, but without sealing headers. Use sha256 again, for reusing the body hash from the previous dkim-signature. // Again ed25519, but without sealing headers. Use sha256 again, for reusing the body hash from the previous dkim-signature.
seled25519b := Selector{ seled25519b := config.Selector{
Hash: "sha256", HashEffective: "sha256",
PrivateKey: ed25519Key, Key: ed25519Key,
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,Subject,Date", ","), HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,Subject,Date", ","),
SealHeaders: true, DontSealHeaders: true,
Domain: dns.Domain{ASCII: "tested25519b"}, Domain: dns.Domain{ASCII: "tested25519b"},
}
dkimConf := config.DKIM{
Selectors: map[string]config.Selector{
"testrsa": selrsa,
"testrsa2": selrsa2,
"tested25519": seled25519,
"tested25519b": seled25519b,
},
Sign: []string{"testrsa", "testrsa2", "tested25519", "tested25519b"},
} }
selectors := []Selector{selrsa, selrsa2, seled25519, seled25519b}
ctx := context.Background() ctx := context.Background()
headers, err := Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader(message)) headers, err := Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(message))
if err != nil { if err != nil {
t.Fatalf("sign: %v", err) t.Fatalf("sign: %v", err)
} }
@ -287,7 +293,7 @@ test
nmsg := headers + message nmsg := headers + message
results, err := Verify(ctx, pkglog.Logger, resolver, false, policyOK, strings.NewReader(nmsg), false) results, err := Verify(ctx, resolver, false, policyOK, strings.NewReader(nmsg), false)
if err != nil { if err != nil {
t.Fatalf("verify: %s", err) t.Fatalf("verify: %s", err)
} }
@ -298,31 +304,31 @@ test
//log.Infof("nmsg\n%s", nmsg) //log.Infof("nmsg\n%s", nmsg)
// Multiple From headers. // Multiple From headers.
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("From: <mjl@mox.example>\r\nFrom: <mjl@mox.example>\r\n\r\ntest")) _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("From: <mjl@mox.example>\r\nFrom: <mjl@mox.example>\r\n\r\ntest"))
if !errors.Is(err, ErrFrom) { if !errors.Is(err, ErrFrom) {
t.Fatalf("sign, got err %v, expected ErrFrom", err) t.Fatalf("sign, got err %v, expected ErrFrom", err)
} }
// No From header. // No From header.
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("Brom: <mjl@mox.example>\r\n\r\ntest")) _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("Brom: <mjl@mox.example>\r\n\r\ntest"))
if !errors.Is(err, ErrFrom) { if !errors.Is(err, ErrFrom) {
t.Fatalf("sign, got err %v, expected ErrFrom", err) t.Fatalf("sign, got err %v, expected ErrFrom", err)
} }
// Malformed headers. // Malformed headers.
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader(":\r\n\r\ntest")) _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(":\r\n\r\ntest"))
if !errors.Is(err, ErrHeaderMalformed) { if !errors.Is(err, ErrHeaderMalformed) {
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
} }
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader(" From:<mjl@mox.example>\r\n\r\ntest")) _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader(" From:<mjl@mox.example>\r\n\r\ntest"))
if !errors.Is(err, ErrHeaderMalformed) { if !errors.Is(err, ErrHeaderMalformed) {
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
} }
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("Frøm:<mjl@mox.example>\r\n\r\ntest")) _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("Frøm:<mjl@mox.example>\r\n\r\ntest"))
if !errors.Is(err, ErrHeaderMalformed) { if !errors.Is(err, ErrHeaderMalformed) {
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
} }
_, err = Sign(ctx, pkglog.Logger, "mjl", dns.Domain{ASCII: "mox.example"}, selectors, false, strings.NewReader("From:<mjl@mox.example>")) _, err = Sign(ctx, "mjl", dns.Domain{ASCII: "mox.example"}, dkimConf, false, strings.NewReader("From:<mjl@mox.example>"))
if !errors.Is(err, ErrHeaderMalformed) { if !errors.Is(err, ErrHeaderMalformed) {
t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err) t.Fatalf("sign, got err %v, expected ErrHeaderMalformed", err)
} }
@ -349,9 +355,9 @@ test
var record *Record var record *Record
var recordTxt string var recordTxt string
var msg string var msg string
var sel config.Selector
var dkimConf config.DKIM
var policy func(*Sig) error var policy func(*Sig) error
var sel Selector
var selectors []Selector
var signed bool var signed bool
var signDomain dns.Domain var signDomain dns.Domain
@ -380,13 +386,18 @@ test
}, },
} }
sel = Selector{ sel = config.Selector{
Hash: "sha256", HashEffective: "sha256",
PrivateKey: key, Key: key,
Headers: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","), HeadersEffective: strings.Split("From,To,Cc,Bcc,Reply-To,References,In-Reply-To,Subject,Date,Message-ID,Content-Type", ","),
Domain: dns.Domain{ASCII: "test"}, Domain: dns.Domain{ASCII: "test"},
}
dkimConf = config.DKIM{
Selectors: map[string]config.Selector{
"test": sel,
},
Sign: []string{"test"},
} }
selectors = []Selector{sel}
msg = message msg = message
signed = false signed = false
@ -397,7 +408,7 @@ test
msg = strings.ReplaceAll(msg, "\n", "\r\n") msg = strings.ReplaceAll(msg, "\n", "\r\n")
headers, err := Sign(context.Background(), pkglog.Logger, "mjl", signDomain, selectors, false, strings.NewReader(msg)) headers, err := Sign(context.Background(), "mjl", signDomain, dkimConf, false, strings.NewReader(msg))
if err != nil { if err != nil {
t.Fatalf("sign: %v", err) t.Fatalf("sign: %v", err)
} }
@ -414,7 +425,7 @@ test
sign() sign()
} }
results, err := Verify(context.Background(), pkglog.Logger, resolver, true, policy, strings.NewReader(msg), false) results, err := Verify(context.Background(), resolver, true, policy, strings.NewReader(msg), false)
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) {
t.Fatalf("got verify error %v, expected %v", err, expErr) t.Fatalf("got verify error %v, expected %v", err, expErr)
} }
@ -449,8 +460,8 @@ test
}) })
// DNS request is failing temporarily. // DNS request is failing temporarily.
test(nil, StatusTemperror, ErrDNS, func() { test(nil, StatusTemperror, ErrDNS, func() {
resolver.Fail = []string{ resolver.Fail = map[dns.Mockreq]struct{}{
"txt test._domainkey.mox.example.", {Type: "txt", Name: "test._domainkey.mox.example."}: {},
} }
}) })
// Claims to be DKIM through v=, but cannot be parsed. ../rfc/6376:2621 // Claims to be DKIM through v=, but cannot be parsed. ../rfc/6376:2621
@ -501,9 +512,11 @@ test
}) })
// Unknown canonicalization. // Unknown canonicalization.
test(nil, StatusPermerror, ErrCanonicalizationUnknown, func() { test(nil, StatusPermerror, ErrCanonicalizationUnknown, func() {
sel.HeaderRelaxed = true sel.Canonicalization.HeaderRelaxed = true
sel.BodyRelaxed = true sel.Canonicalization.BodyRelaxed = true
selectors = []Selector{sel} dkimConf.Selectors = map[string]config.Selector{
"test": sel,
}
sign() sign()
msg = strings.ReplaceAll(msg, "relaxed/relaxed", "bogus/bogus") msg = strings.ReplaceAll(msg, "relaxed/relaxed", "bogus/bogus")
@ -561,8 +574,10 @@ test
resolver.TXT = map[string][]string{ resolver.TXT = map[string][]string{
"test._domainkey.mox.example.": {txt}, "test._domainkey.mox.example.": {txt},
} }
sel.PrivateKey = key sel.Key = key
selectors = []Selector{sel} dkimConf.Selectors = map[string]config.Selector{
"test": sel,
}
}) })
// Key not allowed for email by DNS record. ../rfc/6376:1541 // Key not allowed for email by DNS record. ../rfc/6376:1541
test(nil, StatusPermerror, ErrKeyNotForEmail, func() { test(nil, StatusPermerror, ErrKeyNotForEmail, func() {
@ -585,14 +600,18 @@ test
// Check that last-occurring header field is used. // Check that last-occurring header field is used.
test(nil, StatusFail, ErrSigVerify, func() { test(nil, StatusFail, ErrSigVerify, func() {
sel.SealHeaders = false sel.DontSealHeaders = true
selectors = []Selector{sel} dkimConf.Selectors = map[string]config.Selector{
"test": sel,
}
sign() sign()
msg = strings.ReplaceAll(msg, "\r\n\r\n", "\r\nsubject: another\r\n\r\n") msg = strings.ReplaceAll(msg, "\r\n\r\n", "\r\nsubject: another\r\n\r\n")
}) })
test(nil, StatusPass, nil, func() { test(nil, StatusPass, nil, func() {
sel.SealHeaders = false sel.DontSealHeaders = true
selectors = []Selector{sel} dkimConf.Selectors = map[string]config.Selector{
"test": sel,
}
sign() sign()
msg = "subject: another\r\n" + msg msg = "subject: another\r\n" + msg
}) })

View File

@ -6,15 +6,11 @@ import (
"strconv" "strconv"
"strings" "strings"
"golang.org/x/text/unicode/norm"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/moxvar"
"github.com/mjl-/mox/smtp" "github.com/mjl-/mox/smtp"
) )
// Pedantic enables stricter parsing.
var Pedantic bool
type parseErr string type parseErr string
func (e parseErr) Error() string { func (e parseErr) Error() string {
@ -204,18 +200,18 @@ func (p *parser) xdomainselector(isselector bool) dns.Domain {
// domain names must always be a-labels, ../rfc/6376:1115 ../rfc/6376:1187 ../rfc/6376:1303 // domain names must always be a-labels, ../rfc/6376:1115 ../rfc/6376:1187 ../rfc/6376:1303
// dkim selectors with underscores happen in the wild, accept them when not in // dkim selectors with underscores happen in the wild, accept them when not in
// pedantic mode. ../rfc/6376:581 ../rfc/5321:2303 // pedantic mode. ../rfc/6376:581 ../rfc/5321:2303
return isalphadigit(c) || (i > 0 && (c == '-' || isselector && !Pedantic && c == '_') && p.o+1 < len(p.s)) return isalphadigit(c) || (i > 0 && (c == '-' || isselector && !moxvar.Pedantic && c == '_') && p.o+1 < len(p.s))
} }
s := p.xtakefn1(false, subdomain) s := p.xtakefn1(false, subdomain)
for p.hasPrefix(".") { for p.hasPrefix(".") {
s += p.xtake(".") + p.xtakefn1(false, subdomain) s += p.xtake(".") + p.xtakefn1(false, subdomain)
} }
if isselector {
// Not to be interpreted as IDNA.
return dns.Domain{ASCII: strings.ToLower(s)}
}
d, err := dns.ParseDomain(s) d, err := dns.ParseDomain(s)
if err != nil { if err != nil {
// ParseDomain does not allow underscore, work around it.
if strings.Contains(s, "_") && isselector && !moxvar.Pedantic {
return dns.Domain{ASCII: strings.ToLower(s)}
}
p.xerrorf("parsing domain %q: %s", s, err) p.xerrorf("parsing domain %q: %s", s, err)
} }
return d return d
@ -277,11 +273,11 @@ func (p *parser) xlocalpart() smtp.Localpart {
} }
} }
// In the wild, some services use large localparts for generated (bounce) addresses. // In the wild, some services use large localparts for generated (bounce) addresses.
if Pedantic && len(s) > 64 || len(s) > 128 { if moxvar.Pedantic && len(s) > 64 || len(s) > 128 {
// ../rfc/5321:3486 // ../rfc/5321:3486
p.xerrorf("localpart longer than 64 octets") p.xerrorf("localpart longer than 64 octets")
} }
return smtp.Localpart(norm.NFC.String(s)) return smtp.Localpart(s)
} }
func (p *parser) xquotedString() string { func (p *parser) xquotedString() string {

View File

@ -117,7 +117,7 @@ func (s *Sig) Header() (string, error) {
} else if i == len(s.SignedHeaders)-1 { } else if i == len(s.SignedHeaders)-1 {
v += ";" v += ";"
} }
w.Addf(sep, "%s", v) w.Addf(sep, v)
} }
} }
if len(s.CopiedHeaders) > 0 { if len(s.CopiedHeaders) > 0 {
@ -139,7 +139,7 @@ func (s *Sig) Header() (string, error) {
} else if i == len(s.CopiedHeaders)-1 { } else if i == len(s.CopiedHeaders)-1 {
v += ";" v += ";"
} }
w.Addf(sep, "%s", v) w.Addf(sep, v)
} }
} }
@ -147,7 +147,7 @@ func (s *Sig) Header() (string, error) {
w.Addf(" ", "b=") w.Addf(" ", "b=")
if len(s.Signature) > 0 { if len(s.Signature) > 0 {
w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)), false) w.AddWrap([]byte(base64.StdEncoding.EncodeToString(s.Signature)))
} }
w.Add("\r\n") w.Add("\r\n")
return w.String(), nil return w.String(), nil

View File

@ -91,7 +91,7 @@ func TestSig(t *testing.T) {
BodyHash: xbase64("LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q="), BodyHash: xbase64("LjkN2rUhrS3zKXfH2vNgUzz5ERRJkgP9CURXBX0JP0Q="),
Domain: xdomain("xn--mx-lka.example"), // møx.example Domain: xdomain("xn--mx-lka.example"), // møx.example
SignedHeaders: []string{"from"}, SignedHeaders: []string{"from"},
Selector: dns.Domain{ASCII: "xn--tst-bma"}, Selector: xdomain("xn--tst-bma"), // tést
Identity: &Identity{&ulp, xdomain("xn--tst-bma.xn--mx-lka.example")}, // tést.møx.example Identity: &Identity{&ulp, xdomain("xn--tst-bma.xn--mx-lka.example")}, // tést.møx.example
Canonicalization: "simple/simple", Canonicalization: "simple/simple",
Length: -1, Length: -1,

View File

@ -32,7 +32,7 @@ func TestParseRecord(t *testing.T) {
} }
if r != nil { if r != nil {
pk := r.Pubkey pk := r.Pubkey
for range 2 { for i := 0; i < 2; i++ {
ntxt, err := r.Record() ntxt, err := r.Record()
if err != nil { if err != nil {
t.Fatalf("making record: %v", err) t.Fatalf("making record: %v", err)

View File

@ -14,20 +14,34 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog" mathrand "math/rand"
mathrand2 "math/rand/v2"
"time" "time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/mjl-/mox/dkim" "github.com/mjl-/mox/dkim"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/publicsuffix" "github.com/mjl-/mox/publicsuffix"
"github.com/mjl-/mox/spf" "github.com/mjl-/mox/spf"
"github.com/mjl-/mox/stub"
) )
var xlog = mlog.New("dmarc")
var ( var (
MetricVerify stub.HistogramVec = stub.HistogramVecIgnore{} metricDMARCVerify = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "mox_dmarc_verify_duration_seconds",
Help: "DMARC verify, including lookup, duration and result.",
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20},
},
[]string{
"status",
"reject", // yes/no
"use", // yes/no, if policy is used after random selection
},
)
) )
// link errata: // link errata:
@ -57,21 +71,16 @@ const (
// Result is a DMARC policy evaluation. // Result is a DMARC policy evaluation.
type Result struct { type Result struct {
// Whether to reject the message based on policies. If false, the message should // Whether to reject the message based on policies. If false, the message should
// not necessarily be accepted: other checks such as reputation-based and // not necessarily be accepted, e.g. due to reputation or content-based analysis.
// content-based analysis may lead to reject the message.
Reject bool Reject bool
// Result of DMARC validation. A message can fail validation, but still // Result of DMARC validation. A message can fail validation, but still
// not be rejected, e.g. if the policy is "none". // not be rejected, e.g. if the policy is "none".
Status Status Status Status
AlignedSPFPass bool
AlignedDKIMPass bool
// Domain with the DMARC DNS record. May be the organizational domain instead of // Domain with the DMARC DNS record. May be the organizational domain instead of
// the domain in the From-header. // the domain in the From-header.
Domain dns.Domain Domain dns.Domain
// Parsed DMARC record. // Parsed DMARC record.
Record *Record Record *Record
// Whether DMARC DNS response was DNSSEC-signed, regardless of whether SPF/DKIM records were DNSSEC-signed.
RecordAuthentic bool
// Details about possible error condition, e.g. when parsing the DMARC record failed. // Details about possible error condition, e.g. when parsing the DMARC record failed.
Err error Err error
} }
@ -84,45 +93,36 @@ type Result struct {
// domain is determined using the public suffix list. E.g. for // domain is determined using the public suffix list. E.g. for
// "sub.example.com", the organizational domain is "example.com". The returned // "sub.example.com", the organizational domain is "example.com". The returned
// domain is the domain with the DMARC record. // domain is the domain with the DMARC record.
// func Lookup(ctx context.Context, resolver dns.Resolver, from dns.Domain) (status Status, domain dns.Domain, record *Record, txt string, rerr error) {
// rauthentic indicates if the DNS results were DNSSEC-verified. log := xlog.WithContext(ctx)
func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFrom dns.Domain) (status Status, domain dns.Domain, record *Record, txt string, rauthentic bool, rerr error) {
log := mlog.New("dmarc", elog)
start := time.Now() start := time.Now()
defer func() { defer func() {
log.Debugx("dmarc lookup result", rerr, log.Debugx("dmarc lookup result", rerr, mlog.Field("fromdomain", from), mlog.Field("status", status), mlog.Field("domain", domain), mlog.Field("record", record), mlog.Field("duration", time.Since(start)))
slog.Any("fromdomain", msgFrom),
slog.Any("status", status),
slog.Any("domain", domain),
slog.Any("record", record),
slog.Duration("duration", time.Since(start)))
}() }()
// ../rfc/7489:859 ../rfc/7489:1370 // ../rfc/7489:859 ../rfc/7489:1370
domain = msgFrom domain = from
status, record, txt, authentic, err := lookupRecord(ctx, resolver, domain) status, record, txt, err := lookupRecord(ctx, resolver, domain)
if status != StatusNone { if status != StatusNone {
return status, domain, record, txt, authentic, err return status, domain, record, txt, err
} }
if record == nil { if record == nil {
// ../rfc/7489:761 ../rfc/7489:1377 // ../rfc/7489:761 ../rfc/7489:1377
domain = publicsuffix.Lookup(ctx, log.Logger, msgFrom) domain = publicsuffix.Lookup(ctx, from)
if domain == msgFrom { if domain == from {
return StatusNone, domain, nil, txt, authentic, err return StatusNone, domain, nil, txt, err
} }
var xauth bool status, record, txt, err = lookupRecord(ctx, resolver, domain)
status, record, txt, xauth, err = lookupRecord(ctx, resolver, domain)
authentic = authentic && xauth
} }
return status, domain, record, txt, authentic, err return status, domain, record, txt, err
} }
func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (Status, *Record, string, bool, error) { func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain) (Status, *Record, string, error) {
name := "_dmarc." + domain.ASCII + "." name := "_dmarc." + domain.ASCII + "."
txts, result, err := dns.WithPackage(resolver, "dmarc").LookupTXT(ctx, name) txts, err := dns.WithPackage(resolver, "dmarc").LookupTXT(ctx, name)
if err != nil && !dns.IsNotFound(err) { if err != nil && !dns.IsNotFound(err) {
return StatusTemperror, nil, "", result.Authentic, fmt.Errorf("%w: %s", ErrDNS, err) return StatusTemperror, nil, "", fmt.Errorf("%w: %s", ErrDNS, err)
} }
var record *Record var record *Record
var text string var text string
@ -133,82 +133,17 @@ func lookupRecord(ctx context.Context, resolver dns.Resolver, domain dns.Domain)
// ../rfc/7489:1374 // ../rfc/7489:1374
continue continue
} else if err != nil { } else if err != nil {
return StatusPermerror, nil, text, result.Authentic, fmt.Errorf("%w: %s", ErrSyntax, err) return StatusPermerror, nil, text, fmt.Errorf("%w: %s", ErrSyntax, err)
} }
if record != nil { if record != nil {
// ../rfc/7489:1388 // ../ ../rfc/7489:1388
return StatusNone, nil, "", result.Authentic, ErrMultipleRecords return StatusNone, nil, "", ErrMultipleRecords
} }
text = txt text = txt
record = r record = r
rerr = nil rerr = nil
} }
return StatusNone, record, text, result.Authentic, rerr return StatusNone, record, text, rerr
}
func lookupReportsRecord(ctx context.Context, resolver dns.Resolver, dmarcDomain, extDestDomain dns.Domain) (Status, []*Record, []string, bool, error) {
// ../rfc/7489:1566
name := dmarcDomain.ASCII + "._report._dmarc." + extDestDomain.ASCII + "."
txts, result, err := dns.WithPackage(resolver, "dmarc").LookupTXT(ctx, name)
if err != nil && !dns.IsNotFound(err) {
return StatusTemperror, nil, nil, result.Authentic, fmt.Errorf("%w: %s", ErrDNS, err)
}
var records []*Record
var texts []string
var rerr error = ErrNoRecord
for _, txt := range txts {
r, isdmarc, err := ParseRecordNoRequired(txt)
// Examples in the RFC use "v=DMARC1", even though it isn't a valid DMARC record.
// Accept the specific example.
// ../rfc/7489-eid5440
if !isdmarc && txt == "v=DMARC1" {
xr := DefaultRecord
r, isdmarc, err = &xr, true, nil
}
if !isdmarc {
// ../rfc/7489:1586
continue
}
texts = append(texts, txt)
records = append(records, r)
if err != nil {
return StatusPermerror, records, texts, result.Authentic, fmt.Errorf("%w: %s", ErrSyntax, err)
}
// Multiple records are allowed for the _report record, unlike for policies. ../rfc/7489:1593
rerr = nil
}
return StatusNone, records, texts, result.Authentic, rerr
}
// LookupExternalReportsAccepted returns whether the extDestDomain has opted in
// to receiving dmarc reports for dmarcDomain (where the dmarc record was found),
// through a "._report._dmarc." DNS TXT DMARC record.
//
// accepts is true if the external domain has opted in.
// If a temporary error occurred, the returned status is StatusTemperror, and a
// later retry may give an authoritative result.
// The returned error is ErrNoRecord if no opt-in DNS record exists, which is
// not a failure condition.
//
// The normally invalid "v=DMARC1" record is accepted since it is used as
// example in RFC 7489.
//
// authentic indicates if the DNS results were DNSSEC-verified.
func LookupExternalReportsAccepted(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, dmarcDomain dns.Domain, extDestDomain dns.Domain) (accepts bool, status Status, records []*Record, txts []string, authentic bool, rerr error) {
log := mlog.New("dmarc", elog)
start := time.Now()
defer func() {
log.Debugx("dmarc externalreports result", rerr,
slog.Bool("accepts", accepts),
slog.Any("dmarcdomain", dmarcDomain),
slog.Any("extdestdomain", extDestDomain),
slog.Any("records", records),
slog.Duration("duration", time.Since(start)))
}()
status, records, txts, authentic, rerr = lookupReportsRecord(ctx, resolver, dmarcDomain, extDestDomain)
accepts = rerr == nil
return accepts, status, records, txts, authentic, rerr
} }
// Verify evaluates the DMARC policy for the domain in the From-header of a // Verify evaluates the DMARC policy for the domain in the From-header of a
@ -222,10 +157,9 @@ func LookupExternalReportsAccepted(ctx context.Context, elog *slog.Logger, resol
// Verify always returns the result of verifying the DMARC policy // Verify always returns the result of verifying the DMARC policy
// against the message (for inclusion in Authentication-Result headers). // against the message (for inclusion in Authentication-Result headers).
// //
// useResult indicates if the result should be applied in a policy decision, // useResult indicates if the result should be applied in a policy decision.
// based on the "pct" field in the DMARC record. func Verify(ctx context.Context, resolver dns.Resolver, from dns.Domain, dkimResults []dkim.Result, spfResult spf.Status, spfIdentity *dns.Domain, applyRandomPercentage bool) (useResult bool, result Result) {
func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFrom dns.Domain, dkimResults []dkim.Result, spfResult spf.Status, spfIdentity *dns.Domain, applyRandomPercentage bool) (useResult bool, result Result) { log := xlog.WithContext(ctx)
log := mlog.New("dmarc", elog)
start := time.Now() start := time.Now()
defer func() { defer func() {
use := "no" use := "no"
@ -236,33 +170,25 @@ func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFr
if result.Reject { if result.Reject {
reject = "yes" reject = "yes"
} }
MetricVerify.ObserveLabels(float64(time.Since(start))/float64(time.Second), string(result.Status), reject, use) metricDMARCVerify.WithLabelValues(string(result.Status), reject, use).Observe(float64(time.Since(start)) / float64(time.Second))
log.Debugx("dmarc verify result", result.Err, log.Debugx("dmarc verify result", result.Err, mlog.Field("fromdomain", from), mlog.Field("dkimresults", dkimResults), mlog.Field("spfresult", spfResult), mlog.Field("status", result.Status), mlog.Field("reject", result.Reject), mlog.Field("use", useResult), mlog.Field("duration", time.Since(start)))
slog.Any("fromdomain", msgFrom),
slog.Any("dkimresults", dkimResults),
slog.Any("spfresult", spfResult),
slog.Any("status", result.Status),
slog.Bool("reject", result.Reject),
slog.Bool("use", useResult),
slog.Duration("duration", time.Since(start)))
}() }()
status, recordDomain, record, _, authentic, err := Lookup(ctx, log.Logger, resolver, msgFrom) status, recordDomain, record, _, err := Lookup(ctx, resolver, from)
if record == nil { if record == nil {
return false, Result{false, status, false, false, recordDomain, record, authentic, err} return false, Result{false, status, recordDomain, record, err}
} }
result.Domain = recordDomain result.Domain = recordDomain
result.Record = record result.Record = record
result.RecordAuthentic = authentic
// Record can request sampling of messages to apply policy. // Record can request sampling of messages to apply policy.
// See ../rfc/7489:1432 // See ../rfc/7489:1432
useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand2.IntN(100) < record.Percentage useResult = !applyRandomPercentage || record.Percentage == 100 || mathrand.Intn(100) < record.Percentage
// We treat "quarantine" and "reject" the same. Thus, we also don't "downgrade" // We reject treat "quarantine" and "reject" the same. Thus, we also don't
// from reject to quarantine if this message was sampled out. // "downgrade" from reject to quarantine if this message was sampled out.
// ../rfc/7489:1446 ../rfc/7489:1024 // ../rfc/7489:1446 ../rfc/7489:1024
if recordDomain != msgFrom && record.SubdomainPolicy != PolicyEmpty { if recordDomain != from && record.SubdomainPolicy != PolicyEmpty {
result.Reject = record.SubdomainPolicy != PolicyNone result.Reject = record.SubdomainPolicy != PolicyNone
} else { } else {
result.Reject = record.Policy != PolicyNone result.Reject = record.Policy != PolicyNone
@ -282,15 +208,17 @@ func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFr
if r, ok := pubsuffixes[name]; ok { if r, ok := pubsuffixes[name]; ok {
return r return r
} }
r := publicsuffix.Lookup(ctx, log.Logger, name) r := publicsuffix.Lookup(ctx, name)
pubsuffixes[name] = r pubsuffixes[name] = r
return r return r
} }
// ../rfc/7489:1319 // ../rfc/7489:1319
// ../rfc/7489:544 // ../rfc/7489:544
if spfResult == spf.StatusPass && spfIdentity != nil && (*spfIdentity == msgFrom || result.Record.ASPF == "r" && pubsuffix(msgFrom) == pubsuffix(*spfIdentity)) { if spfResult == spf.StatusPass && spfIdentity != nil && (*spfIdentity == from || result.Record.ASPF == "r" && pubsuffix(from) == pubsuffix(*spfIdentity)) {
result.AlignedSPFPass = true result.Reject = false
result.Status = StatusPass
return
} }
for _, dkimResult := range dkimResults { for _, dkimResult := range dkimResults {
@ -300,16 +228,12 @@ func Verify(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, msgFr
continue continue
} }
// ../rfc/7489:511 // ../rfc/7489:511
if dkimResult.Status == dkim.StatusPass && dkimResult.Sig != nil && (dkimResult.Sig.Domain == msgFrom || result.Record.ADKIM == "r" && pubsuffix(msgFrom) == pubsuffix(dkimResult.Sig.Domain)) { if dkimResult.Status == dkim.StatusPass && dkimResult.Sig != nil && (dkimResult.Sig.Domain == from || result.Record.ADKIM == "r" && pubsuffix(from) == pubsuffix(dkimResult.Sig.Domain)) {
// ../rfc/7489:535 // ../rfc/7489:535
result.AlignedDKIMPass = true result.Reject = false
break result.Status = StatusPass
return
} }
} }
if result.AlignedSPFPass || result.AlignedDKIMPass {
result.Reject = false
result.Status = StatusPass
}
return return
} }

View File

@ -8,12 +8,9 @@ import (
"github.com/mjl-/mox/dkim" "github.com/mjl-/mox/dkim"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/spf" "github.com/mjl-/mox/spf"
) )
var pkglog = mlog.New("dmarc", nil)
func TestLookup(t *testing.T) { func TestLookup(t *testing.T) {
resolver := dns.MockResolver{ resolver := dns.MockResolver{
TXT: map[string][]string{ TXT: map[string][]string{
@ -24,15 +21,15 @@ func TestLookup(t *testing.T) {
"_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus;"}, "_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus;"},
"_dmarc.example.com.": {"v=DMARC1; p=none;"}, "_dmarc.example.com.": {"v=DMARC1; p=none;"},
}, },
Fail: []string{ Fail: map[dns.Mockreq]struct{}{
"txt _dmarc.temperror.example.", {Type: "txt", Name: "_dmarc.temperror.example."}: {},
}, },
} }
test := func(d string, expStatus Status, expDomain string, expRecord *Record, expErr error) { test := func(d string, expStatus Status, expDomain string, expRecord *Record, expErr error) {
t.Helper() t.Helper()
status, dom, record, _, _, err := Lookup(context.Background(), pkglog.Logger, resolver, dns.Domain{ASCII: d}) status, dom, record, _, err := Lookup(context.Background(), resolver, dns.Domain{ASCII: d})
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) { if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) {
t.Fatalf("got err %#v, expected %#v", err, expErr) t.Fatalf("got err %#v, expected %#v", err, expErr)
} }
@ -53,45 +50,6 @@ func TestLookup(t *testing.T) {
test("sub.example.com", StatusNone, "example.com", &r, nil) // Policy published at organizational domain, public suffix. test("sub.example.com", StatusNone, "example.com", &r, nil) // Policy published at organizational domain, public suffix.
} }
func TestLookupExternalReportsAccepted(t *testing.T) {
resolver := dns.MockResolver{
TXT: map[string][]string{
"example.com._report._dmarc.simple.example.": {"v=DMARC1"},
"example.com._report._dmarc.simple2.example.": {"v=DMARC1;"},
"example.com._report._dmarc.one.example.": {"v=DMARC1; p=none;", "other"},
"example.com._report._dmarc.temperror.example.": {"v=DMARC1; p=none;"},
"example.com._report._dmarc.multiple.example.": {"v=DMARC1; p=none;", "v=DMARC1"},
"example.com._report._dmarc.malformed.example.": {"v=DMARC1; p=none; bogus;"},
},
Fail: []string{
"txt example.com._report._dmarc.temperror.example.",
},
}
test := func(dom, extdom string, expStatus Status, expAccepts bool, expErr error) {
t.Helper()
accepts, status, _, _, _, err := LookupExternalReportsAccepted(context.Background(), pkglog.Logger, resolver, dns.Domain{ASCII: dom}, dns.Domain{ASCII: extdom})
if (err == nil) != (expErr == nil) || err != nil && !errors.Is(err, expErr) {
t.Fatalf("got err %#v, expected %#v", err, expErr)
}
if status != expStatus || accepts != expAccepts {
t.Fatalf("got status %s, accepts %v, expected %v, %v", status, accepts, expStatus, expAccepts)
}
}
r := DefaultRecord
r.Policy = PolicyNone
test("example.com", "simple.example", StatusNone, true, nil)
test("example.org", "simple.example", StatusNone, false, ErrNoRecord)
test("example.com", "simple2.example", StatusNone, true, nil)
test("example.com", "one.example", StatusNone, true, nil)
test("example.com", "absent.example", StatusNone, false, ErrNoRecord)
test("example.com", "multiple.example", StatusNone, true, nil)
test("example.com", "malformed.example", StatusPermerror, false, ErrSyntax)
test("example.com", "temperror.example", StatusTemperror, false, ErrDNS)
}
func TestVerify(t *testing.T) { func TestVerify(t *testing.T) {
resolver := dns.MockResolver{ resolver := dns.MockResolver{
TXT: map[string][]string{ TXT: map[string][]string{
@ -103,8 +61,8 @@ func TestVerify(t *testing.T) {
"_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus"}, "_dmarc.malformed.example.": {"v=DMARC1; p=none; bogus"},
"_dmarc.example.com.": {"v=DMARC1; p=reject"}, "_dmarc.example.com.": {"v=DMARC1; p=reject"},
}, },
Fail: []string{ Fail: map[dns.Mockreq]struct{}{
"txt _dmarc.temperror.example.", {Type: "txt", Name: "_dmarc.temperror.example."}: {},
}, },
} }
@ -127,7 +85,7 @@ func TestVerify(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("parsing domain: %v", err) t.Fatalf("parsing domain: %v", err)
} }
useResult, result := Verify(context.Background(), pkglog.Logger, resolver, from, dkimResults, spfResult, spfIdentity, true) useResult, result := Verify(context.Background(), resolver, from, dkimResults, spfResult, spfIdentity, true)
if useResult != expUseResult || !equalResult(result, expResult) { if useResult != expUseResult || !equalResult(result, expResult) {
t.Fatalf("verify: got useResult %v, result %#v, expected %v %#v", useResult, result, expUseResult, expResult) t.Fatalf("verify: got useResult %v, result %#v, expected %v %#v", useResult, result, expUseResult, expResult)
} }
@ -140,7 +98,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusNone, spf.StatusNone,
nil, nil,
true, Result{true, StatusFail, false, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil}, true, Result{true, StatusFail, dns.Domain{ASCII: "reject.example"}, &reject, nil},
) )
// Accept with spf pass. // Accept with spf pass.
@ -148,7 +106,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusPass, spf.StatusPass,
&dns.Domain{ASCII: "sub.reject.example"}, &dns.Domain{ASCII: "sub.reject.example"},
true, Result{false, StatusPass, true, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil}, true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
) )
// Accept with dkim pass. // Accept with dkim pass.
@ -164,7 +122,7 @@ func TestVerify(t *testing.T) {
}, },
spf.StatusFail, spf.StatusFail,
&dns.Domain{ASCII: "reject.example"}, &dns.Domain{ASCII: "reject.example"},
true, Result{false, StatusPass, false, true, dns.Domain{ASCII: "reject.example"}, &reject, false, nil}, true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
) )
// Reject due to spf and dkim "strict". // Reject due to spf and dkim "strict".
@ -184,7 +142,7 @@ func TestVerify(t *testing.T) {
}, },
spf.StatusPass, spf.StatusPass,
&dns.Domain{ASCII: "sub.strict.example"}, &dns.Domain{ASCII: "sub.strict.example"},
true, Result{true, StatusFail, false, false, dns.Domain{ASCII: "strict.example"}, &strict, false, nil}, true, Result{true, StatusFail, dns.Domain{ASCII: "strict.example"}, &strict, nil},
) )
// No dmarc policy, nothing to say. // No dmarc policy, nothing to say.
@ -192,7 +150,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusNone, spf.StatusNone,
nil, nil,
false, Result{false, StatusNone, false, false, dns.Domain{ASCII: "absent.example"}, nil, false, ErrNoRecord}, false, Result{false, StatusNone, dns.Domain{ASCII: "absent.example"}, nil, ErrNoRecord},
) )
// No dmarc policy, spf pass does nothing. // No dmarc policy, spf pass does nothing.
@ -200,7 +158,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusPass, spf.StatusPass,
&dns.Domain{ASCII: "absent.example"}, &dns.Domain{ASCII: "absent.example"},
false, Result{false, StatusNone, false, false, dns.Domain{ASCII: "absent.example"}, nil, false, ErrNoRecord}, false, Result{false, StatusNone, dns.Domain{ASCII: "absent.example"}, nil, ErrNoRecord},
) )
none := DefaultRecord none := DefaultRecord
@ -210,7 +168,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusPass, spf.StatusPass,
&dns.Domain{ASCII: "none.example"}, &dns.Domain{ASCII: "none.example"},
true, Result{false, StatusPass, true, false, dns.Domain{ASCII: "none.example"}, &none, false, nil}, true, Result{false, StatusPass, dns.Domain{ASCII: "none.example"}, &none, nil},
) )
// No actual reject due to pct=0. // No actual reject due to pct=0.
@ -221,7 +179,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusNone, spf.StatusNone,
nil, nil,
false, Result{true, StatusFail, false, false, dns.Domain{ASCII: "test.example"}, &testr, false, nil}, false, Result{true, StatusFail, dns.Domain{ASCII: "test.example"}, &testr, nil},
) )
// No reject if subdomain has "none" policy. // No reject if subdomain has "none" policy.
@ -232,7 +190,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusFail, spf.StatusFail,
&dns.Domain{ASCII: "sub.subnone.example"}, &dns.Domain{ASCII: "sub.subnone.example"},
true, Result{false, StatusFail, false, false, dns.Domain{ASCII: "subnone.example"}, &sub, false, nil}, true, Result{false, StatusFail, dns.Domain{ASCII: "subnone.example"}, &sub, nil},
) )
// No reject if spf temperror and no other pass. // No reject if spf temperror and no other pass.
@ -240,7 +198,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusTemperror, spf.StatusTemperror,
&dns.Domain{ASCII: "mail.reject.example"}, &dns.Domain{ASCII: "mail.reject.example"},
true, Result{false, StatusTemperror, false, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil}, true, Result{false, StatusTemperror, dns.Domain{ASCII: "reject.example"}, &reject, nil},
) )
// No reject if dkim temperror and no other pass. // No reject if dkim temperror and no other pass.
@ -256,7 +214,7 @@ func TestVerify(t *testing.T) {
}, },
spf.StatusNone, spf.StatusNone,
nil, nil,
true, Result{false, StatusTemperror, false, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil}, true, Result{false, StatusTemperror, dns.Domain{ASCII: "reject.example"}, &reject, nil},
) )
// No reject if spf temperror but still dkim pass. // No reject if spf temperror but still dkim pass.
@ -272,7 +230,7 @@ func TestVerify(t *testing.T) {
}, },
spf.StatusTemperror, spf.StatusTemperror,
&dns.Domain{ASCII: "mail.reject.example"}, &dns.Domain{ASCII: "mail.reject.example"},
true, Result{false, StatusPass, false, true, dns.Domain{ASCII: "reject.example"}, &reject, false, nil}, true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
) )
// No reject if dkim temperror but still spf pass. // No reject if dkim temperror but still spf pass.
@ -288,7 +246,7 @@ func TestVerify(t *testing.T) {
}, },
spf.StatusPass, spf.StatusPass,
&dns.Domain{ASCII: "mail.reject.example"}, &dns.Domain{ASCII: "mail.reject.example"},
true, Result{false, StatusPass, true, false, dns.Domain{ASCII: "reject.example"}, &reject, false, nil}, true, Result{false, StatusPass, dns.Domain{ASCII: "reject.example"}, &reject, nil},
) )
// Bad DMARC record results in permerror without reject. // Bad DMARC record results in permerror without reject.
@ -296,7 +254,7 @@ func TestVerify(t *testing.T) {
[]dkim.Result{}, []dkim.Result{},
spf.StatusNone, spf.StatusNone,
nil, nil,
false, Result{false, StatusPermerror, false, false, dns.Domain{ASCII: "malformed.example"}, nil, false, ErrSyntax}, false, Result{false, StatusPermerror, dns.Domain{ASCII: "malformed.example"}, nil, ErrSyntax},
) )
// DKIM domain that is higher-level than organizational can not result in a pass. ../rfc/7489:525 // DKIM domain that is higher-level than organizational can not result in a pass. ../rfc/7489:525
@ -312,6 +270,6 @@ func TestVerify(t *testing.T) {
}, },
spf.StatusNone, spf.StatusNone,
nil, nil,
true, Result{true, StatusFail, false, false, dns.Domain{ASCII: "example.com"}, &reject, false, nil}, true, Result{true, StatusFail, dns.Domain{ASCII: "example.com"}, &reject, nil},
) )
} }

View File

@ -1,85 +0,0 @@
package dmarc_test
import (
"context"
"log"
"log/slog"
"net"
"strings"
"github.com/mjl-/mox/dkim"
"github.com/mjl-/mox/dmarc"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/message"
"github.com/mjl-/mox/spf"
)
func ExampleLookup() {
ctx := context.Background()
resolver := dns.StrictResolver{}
msgFrom, err := dns.ParseDomain("sub.example.com")
if err != nil {
log.Fatalf("parsing from domain: %v", err)
}
// Lookup DMARC DNS record for domain.
status, domain, record, txt, authentic, err := dmarc.Lookup(ctx, slog.Default(), resolver, msgFrom)
if err != nil {
log.Fatalf("dmarc lookup: %v", err)
}
log.Printf("status %s, domain %s, record %v, txt %q, dnssec %v", status, domain, record, txt, authentic)
}
func ExampleVerify() {
ctx := context.Background()
resolver := dns.StrictResolver{}
// Message to verify.
msg := strings.NewReader("From: <sender@example.com>\r\nMore: headers\r\n\r\nBody\r\n")
msgFrom, _, _, err := message.From(slog.Default(), true, msg, nil)
if err != nil {
log.Fatalf("parsing message for from header: %v", err)
}
// Verify SPF, for use with DMARC.
args := spf.Args{
RemoteIP: net.ParseIP("10.11.12.13"),
MailFromDomain: dns.Domain{ASCII: "sub.example.com"},
}
spfReceived, spfDomain, _, _, err := spf.Verify(ctx, slog.Default(), resolver, args)
if err != nil {
log.Printf("verifying spf: %v", err)
}
// Verify DKIM-Signature headers, for use with DMARC.
smtputf8 := false
ignoreTestMode := false
dkimResults, err := dkim.Verify(ctx, slog.Default(), resolver, smtputf8, dkim.DefaultPolicy, msg, ignoreTestMode)
if err != nil {
log.Printf("verifying dkim: %v", err)
}
// Verify DMARC, based on DKIM and SPF results.
applyRandomPercentage := true
useResult, result := dmarc.Verify(ctx, slog.Default(), resolver, msgFrom.Domain, dkimResults, spfReceived.Result, &spfDomain, applyRandomPercentage)
// Print results.
log.Printf("dmarc status: %s", result.Status)
log.Printf("use result: %v", useResult)
if useResult && result.Reject {
log.Printf("should reject message")
}
log.Printf("result: %#v", result)
}
func ExampleParseRecord() {
txt := "v=DMARC1; p=reject; rua=mailto:postmaster@mox.example"
record, isdmarc, err := dmarc.ParseRecord(txt)
if err != nil {
log.Fatalf("parsing dmarc record: %v (isdmarc: %v)", err, isdmarc)
}
log.Printf("parsed record: %v", record)
}

View File

@ -19,22 +19,7 @@ func (e parseErr) Error() string {
// for easy comparison. // for easy comparison.
// //
// DefaultRecord provides default values for tags not present in s. // DefaultRecord provides default values for tags not present in s.
//
// isdmarc indicates if the record starts tag "v" with value "DMARC1", and should
// be treated as a valid DMARC record. Used to detect possibly multiple DMARC
// records (invalid) for a domain with multiple TXT record (quite common).
func ParseRecord(s string) (record *Record, isdmarc bool, rerr error) { func ParseRecord(s string) (record *Record, isdmarc bool, rerr error) {
return parseRecord(s, true)
}
// ParseRecordNoRequired is like ParseRecord, but don't check for required fields
// for regular DMARC records. Useful for checking the _report._dmarc record,
// used for opting into receiving reports for other domains.
func ParseRecordNoRequired(s string) (record *Record, isdmarc bool, rerr error) {
return parseRecord(s, false)
}
func parseRecord(s string, checkRequired bool) (record *Record, isdmarc bool, rerr error) {
defer func() { defer func() {
x := recover() x := recover()
if x == nil { if x == nil {
@ -92,9 +77,9 @@ func parseRecord(s string, checkRequired bool) (record *Record, isdmarc bool, re
// ../rfc/7489:1105 // ../rfc/7489:1105
p.xerrorf("p= (policy) must be first tag") p.xerrorf("p= (policy) must be first tag")
} }
r.Policy = Policy(p.xtakelist("none", "quarantine", "reject")) r.Policy = DMARCPolicy(p.xtakelist("none", "quarantine", "reject"))
case "sp": case "sp":
r.SubdomainPolicy = Policy(p.xkeyword()) r.SubdomainPolicy = DMARCPolicy(p.xkeyword())
// note: we check if the value is valid before returning. // note: we check if the value is valid before returning.
case "rua": case "rua":
r.AggregateReportAddresses = append(r.AggregateReportAddresses, p.xuri()) r.AggregateReportAddresses = append(r.AggregateReportAddresses, p.xuri())
@ -149,7 +134,7 @@ func parseRecord(s string, checkRequired bool) (record *Record, isdmarc bool, re
// ../rfc/7489:1106 says "p" is required, but ../rfc/7489:1407 implies we must be // ../rfc/7489:1106 says "p" is required, but ../rfc/7489:1407 implies we must be
// able to parse a record without a "p" or with invalid "sp" tag. // able to parse a record without a "p" or with invalid "sp" tag.
sp := r.SubdomainPolicy sp := r.SubdomainPolicy
if checkRequired && (!seen["p"] || sp != PolicyEmpty && sp != PolicyNone && sp != PolicyQuarantine && sp != PolicyReject) { if !seen["p"] || sp != PolicyEmpty && sp != PolicyNone && sp != PolicyQuarantine && sp != PolicyReject {
if len(r.AggregateReportAddresses) > 0 { if len(r.AggregateReportAddresses) > 0 {
r.Policy = PolicyNone r.Policy = PolicyNone
r.SubdomainPolicy = PolicyEmpty r.SubdomainPolicy = PolicyEmpty

View File

@ -5,23 +5,25 @@ import (
"strings" "strings"
) )
// todo: DMARCPolicy should be named just Policy, but this is causing conflicting types in sherpadoc output. should somehow get the dmarc-prefix only in the sherpadoc.
// Policy as used in DMARC DNS record for "p=" or "sp=". // Policy as used in DMARC DNS record for "p=" or "sp=".
type Policy string type DMARCPolicy string
// ../rfc/7489:1157 // ../rfc/7489:1157
const ( const (
PolicyEmpty Policy = "" // Only for the optional Record.SubdomainPolicy. PolicyEmpty DMARCPolicy = "" // Only for the optional Record.SubdomainPolicy.
PolicyNone Policy = "none" PolicyNone DMARCPolicy = "none"
PolicyQuarantine Policy = "quarantine" PolicyQuarantine DMARCPolicy = "quarantine"
PolicyReject Policy = "reject" PolicyReject DMARCPolicy = "reject"
) )
// URI is a destination address for reporting. // URI is a destination address for reporting.
type URI struct { type URI struct {
Address string // Should start with "mailto:". Address string // Should start with "mailto:".
MaxSize uint64 // Optional maximum message size, subject to Unit. MaxSize uint64 // Optional maximum message size, subject to Unit.
Unit string // "" (b), "k", "m", "g", "t" (case insensitive), unit size, where k is 2^10 etc. Unit string // "" (b), "k", "g", "t" (case insensitive), unit size, where k is 2^10 etc.
} }
// String returns a string representation of the URI for inclusion in a DMARC // String returns a string representation of the URI for inclusion in a DMARC
@ -31,7 +33,7 @@ func (u URI) String() string {
s = strings.ReplaceAll(s, ",", "%2C") s = strings.ReplaceAll(s, ",", "%2C")
s = strings.ReplaceAll(s, "!", "%21") s = strings.ReplaceAll(s, "!", "%21")
if u.MaxSize > 0 { if u.MaxSize > 0 {
s += fmt.Sprintf("!%d", u.MaxSize) s += fmt.Sprintf("%d", u.MaxSize)
} }
s += u.Unit s += u.Unit
return s return s
@ -53,17 +55,17 @@ const (
// //
// v=DMARC1; p=reject; rua=mailto:postmaster@mox.example // v=DMARC1; p=reject; rua=mailto:postmaster@mox.example
type Record struct { type Record struct {
Version string // "v=DMARC1", fixed. Version string // "v=DMARC1"
Policy Policy // Required, for "p=". Policy DMARCPolicy // Required, for "p=".
SubdomainPolicy Policy // Like policy but for subdomains. Optional, for "sp=". SubdomainPolicy DMARCPolicy // Like policy but for subdomains. Optional, for "sp=".
AggregateReportAddresses []URI // Optional, for "rua=". Destination addresses for aggregate reports. AggregateReportAddresses []URI // Optional, for "rua=".
FailureReportAddresses []URI // Optional, for "ruf=". Destination addresses for failure reports. FailureReportAddresses []URI // Optional, for "ruf="
ADKIM Align // Alignment: "r" (default) for relaxed or "s" for simple. For "adkim=". ADKIM Align // "r" (default) for relaxed or "s" for simple. For "adkim=".
ASPF Align // Alignment: "r" (default) for relaxed or "s" for simple. For "aspf=". ASPF Align // "r" (default) for relaxed or "s" for simple. For "aspf=".
AggregateReportingInterval int // In seconds, default 86400. For "ri=" AggregateReportingInterval int // Default 86400. For "ri="
FailureReportingOptions []string // "0" (default), "1", "d", "s". For "fo=". FailureReportingOptions []string // "0" (default), "1", "d", "s". For "fo=".
ReportingFormat []string // "afrf" (default). For "rf=". ReportingFormat []string // "afrf" (default). Ffor "rf=".
Percentage int // Between 0 and 100, default 100. For "pct=". Policy applies randomly to this percentage of messages. Percentage int // Between 0 and 100, default 100. For "pct=".
} }
// DefaultRecord holds the defaults for a DMARC record. // DefaultRecord holds the defaults for a DMARC record.
@ -107,13 +109,13 @@ func (r Record) String() string {
s := strings.Join(l, ",") s := strings.Join(l, ",")
write(true, "ruf", s) write(true, "ruf", s)
} }
write(r.ADKIM != "" && r.ADKIM != "r", "adkim", string(r.ADKIM)) write(r.ADKIM != "", "adkim", string(r.ADKIM))
write(r.ASPF != "" && r.ASPF != "r", "aspf", string(r.ASPF)) write(r.ASPF != "", "aspf", string(r.ASPF))
write(r.AggregateReportingInterval != DefaultRecord.AggregateReportingInterval, "ri", fmt.Sprintf("%d", r.AggregateReportingInterval)) write(r.AggregateReportingInterval != DefaultRecord.AggregateReportingInterval, "ri", fmt.Sprintf("%d", r.AggregateReportingInterval))
if len(r.FailureReportingOptions) > 1 || len(r.FailureReportingOptions) == 1 && r.FailureReportingOptions[0] != "0" { if len(r.FailureReportingOptions) > 1 || (len(r.FailureReportingOptions) == 1 && r.FailureReportingOptions[0] != "0") {
write(true, "fo", strings.Join(r.FailureReportingOptions, ":")) write(true, "fo", strings.Join(r.FailureReportingOptions, ":"))
} }
if len(r.ReportingFormat) > 1 || len(r.ReportingFormat) == 1 && !strings.EqualFold(r.ReportingFormat[0], "afrf") { if len(r.ReportingFormat) > 1 || (len(r.ReportingFormat) == 1 && strings.EqualFold(r.ReportingFormat[0], "afrf")) {
write(true, "rf", strings.Join(r.FailureReportingOptions, ":")) write(true, "rf", strings.Join(r.FailureReportingOptions, ":"))
} }
write(r.Percentage != 100, "pct", fmt.Sprintf("%d", r.Percentage)) write(r.Percentage != 100, "pct", fmt.Sprintf("%d", r.Percentage))

View File

@ -1,8 +1,17 @@
// Package dmarcdb stores incoming DMARC reports.
//
// With DMARC, a domain can request emails with DMARC verification results by
// remote mail servers to be sent to a specified address. Mox parses such
// reports, stores them in its database and makes them available through its
// admin web interface.
package dmarcdb package dmarcdb
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"path/filepath"
"sync"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -12,11 +21,16 @@ import (
"github.com/mjl-/mox/dmarcrpt" "github.com/mjl-/mox/dmarcrpt"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
) )
var xlog = mlog.New("dmarcdb")
var ( var (
ReportsDBTypes = []any{DomainFeedback{}} // Types stored in DB. DBTypes = []any{DomainFeedback{}} // Types stored in DB.
ReportsDB *bstore.DB // Exported for backups. DB *bstore.DB // Exported for backups.
mutex sync.Mutex
) )
var ( var (
@ -54,18 +68,44 @@ type DomainFeedback struct {
dmarcrpt.Feedback dmarcrpt.Feedback
} }
func database(ctx context.Context) (rdb *bstore.DB, rerr error) {
mutex.Lock()
defer mutex.Unlock()
if DB == nil {
p := mox.DataDirPath("dmarcrpt.db")
os.MkdirAll(filepath.Dir(p), 0770)
db, err := bstore.Open(ctx, p, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, DBTypes...)
if err != nil {
return nil, err
}
DB = db
}
return DB, nil
}
// Init opens the database.
func Init() error {
_, err := database(mox.Shutdown)
return err
}
// AddReport adds a DMARC aggregate feedback report from an email to the database, // AddReport adds a DMARC aggregate feedback report from an email to the database,
// and updates prometheus metrics. // and updates prometheus metrics.
// //
// fromDomain is the domain in the report message From header. // fromDomain is the domain in the report message From header.
func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain) error { func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain) error {
db, err := database(ctx)
if err != nil {
return err
}
d, err := dns.ParseDomain(f.PolicyPublished.Domain) d, err := dns.ParseDomain(f.PolicyPublished.Domain)
if err != nil { if err != nil {
return fmt.Errorf("parsing domain in report: %v", err) return fmt.Errorf("parsing domain in report: %v", err)
} }
df := DomainFeedback{0, d.Name(), fromDomain.Name(), *f} df := DomainFeedback{0, d.Name(), fromDomain.Name(), *f}
if err := ReportsDB.Insert(ctx, &df); err != nil { if err := db.Insert(ctx, &df); err != nil {
return err return err
} }
@ -104,23 +144,38 @@ func AddReport(ctx context.Context, f *dmarcrpt.Feedback, fromDomain dns.Domain)
// Records returns all reports in the database. // Records returns all reports in the database.
func Records(ctx context.Context) ([]DomainFeedback, error) { func Records(ctx context.Context) ([]DomainFeedback, error) {
return bstore.QueryDB[DomainFeedback](ctx, ReportsDB).List() db, err := database(ctx)
if err != nil {
return nil, err
}
return bstore.QueryDB[DomainFeedback](ctx, db).List()
} }
// RecordID returns the report for the ID. // RecordID returns the report for the ID.
func RecordID(ctx context.Context, id int64) (DomainFeedback, error) { func RecordID(ctx context.Context, id int64) (DomainFeedback, error) {
db, err := database(ctx)
if err != nil {
return DomainFeedback{}, err
}
e := DomainFeedback{ID: id} e := DomainFeedback{ID: id}
err := ReportsDB.Get(ctx, &e) err = db.Get(ctx, &e)
return e, err return e, err
} }
// RecordsPeriodDomain returns the reports overlapping start and end, for the given // RecordsPeriodDomain returns the reports overlapping start and end, for the given
// domain. If domain is empty, all records match for domain. // domain. If domain is empty, all records match for domain.
func RecordsPeriodDomain(ctx context.Context, start, end time.Time, domain string) ([]DomainFeedback, error) { func RecordsPeriodDomain(ctx context.Context, start, end time.Time, domain string) ([]DomainFeedback, error) {
db, err := database(ctx)
if err != nil {
return nil, err
}
s := start.Unix() s := start.Unix()
e := end.Unix() e := end.Unix()
q := bstore.QueryDB[DomainFeedback](ctx, ReportsDB) q := bstore.QueryDB[DomainFeedback](ctx, db)
if domain != "" { if domain != "" {
q.FilterNonzero(DomainFeedback{Domain: domain}) q.FilterNonzero(DomainFeedback{Domain: domain})
} }

View File

@ -17,16 +17,16 @@ var ctxbg = context.Background()
func TestDMARCDB(t *testing.T) { func TestDMARCDB(t *testing.T) {
mox.Shutdown = ctxbg mox.Shutdown = ctxbg
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf") mox.ConfigStaticPath = "../testdata/dmarcdb/fake.conf"
mox.MustLoadConfig(true, false) mox.Conf.Static.DataDir = "."
os.Remove(mox.DataDirPath("dmarcrpt.db")) dbpath := mox.DataDirPath("dmarcrpt.db")
err := Init() os.MkdirAll(filepath.Dir(dbpath), 0770)
tcheckf(t, err, "init") defer os.Remove(dbpath)
defer func() {
err := Close() if err := Init(); err != nil {
tcheckf(t, err, "close") t.Fatalf("init database: %s", err)
}() }
feedback := &dmarcrpt.Feedback{ feedback := &dmarcrpt.Feedback{
ReportMetadata: dmarcrpt.ReportMetadata{ ReportMetadata: dmarcrpt.ReportMetadata{

View File

@ -1,77 +0,0 @@
// Package dmarcdb stores incoming DMARC aggrate reports and evaluations for outgoing aggregate reports.
//
// With DMARC, a domain can request reports with DMARC evaluation results to be
// sent to a specified address. Mox parses such reports, stores them in its
// database and makes them available through its admin web interface. Mox also
// keeps track of the evaluations it does for incoming messages and sends reports
// to mail servers that request reports.
//
// Only aggregate reports are stored and sent. Failure reports about individual
// messages are not implemented.
package dmarcdb
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
"github.com/mjl-/bstore"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/moxvar"
)
// Init opens the databases.
//
// The incoming reports and evaluations for outgoing reports are in separate
// databases for simpler file-based handling of the databases.
func Init() error {
if ReportsDB != nil || EvalDB != nil {
return fmt.Errorf("already initialized")
}
log := mlog.New("dmarcdb", nil)
var err error
ReportsDB, err = openReportsDB(mox.Shutdown, log)
if err != nil {
return fmt.Errorf("open reports db: %v", err)
}
EvalDB, err = openEvalDB(mox.Shutdown, log)
if err != nil {
return fmt.Errorf("open eval db: %v", err)
}
return nil
}
func Close() error {
if err := ReportsDB.Close(); err != nil {
return fmt.Errorf("closing reports db: %w", err)
}
ReportsDB = nil
if err := EvalDB.Close(); err != nil {
return fmt.Errorf("closing eval db: %w", err)
}
EvalDB = nil
return nil
}
func openReportsDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
p := mox.DataDirPath("dmarcrpt.db")
os.MkdirAll(filepath.Dir(p), 0770)
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
return bstore.Open(ctx, p, &opts, ReportsDBTypes...)
}
func openEvalDB(ctx context.Context, log mlog.Log) (*bstore.DB, error) {
p := mox.DataDirPath("dmarceval.db")
os.MkdirAll(filepath.Dir(p), 0770)
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: moxvar.RegisterLogger(p, log.Logger)}
return bstore.Open(ctx, p, &opts, EvalDBTypes...)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,403 +0,0 @@
package dmarcdb
import (
"context"
"encoding/json"
"encoding/xml"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/mjl-/mox/dmarcrpt"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/moxio"
"github.com/mjl-/mox/queue"
"slices"
)
func tcheckf(t *testing.T, err error, format string, args ...any) {
t.Helper()
if err != nil {
t.Fatalf("%s: %s", fmt.Sprintf(format, args...), err)
}
}
func tcompare(t *testing.T, got, expect any) {
t.Helper()
if !reflect.DeepEqual(got, expect) {
t.Fatalf("got:\n%v\nexpected:\n%v", got, expect)
}
}
func TestEvaluations(t *testing.T) {
os.RemoveAll("../testdata/dmarcdb/data")
mox.Context = ctxbg
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
mox.MustLoadConfig(true, false)
os.Remove(mox.DataDirPath("dmarceval.db"))
err := Init()
tcheckf(t, err, "init")
defer func() {
err := Close()
tcheckf(t, err, "close")
}()
parseJSON := func(s string) (e Evaluation) {
t.Helper()
err := json.Unmarshal([]byte(s), &e)
tcheckf(t, err, "unmarshal")
return
}
packJSON := func(e Evaluation) string {
t.Helper()
buf, err := json.Marshal(e)
tcheckf(t, err, "marshal")
return string(buf)
}
e0 := Evaluation{
PolicyDomain: "sender1.example",
Evaluated: time.Now().Round(0),
IntervalHours: 1,
PolicyPublished: dmarcrpt.PolicyPublished{
Domain: "sender1.example",
ADKIM: dmarcrpt.AlignmentRelaxed,
ASPF: dmarcrpt.AlignmentRelaxed,
Policy: dmarcrpt.DispositionReject,
SubdomainPolicy: dmarcrpt.DispositionReject,
Percentage: 100,
},
SourceIP: "10.1.2.3",
Disposition: dmarcrpt.DispositionNone,
AlignedDKIMPass: true,
AlignedSPFPass: true,
EnvelopeTo: "mox.example",
EnvelopeFrom: "sender1.example",
HeaderFrom: "sender1.example",
DKIMResults: []dmarcrpt.DKIMAuthResult{
{
Domain: "sender1.example",
Selector: "test",
Result: dmarcrpt.DKIMPass,
},
},
SPFResults: []dmarcrpt.SPFAuthResult{
{
Domain: "sender1.example",
Scope: dmarcrpt.SPFDomainScopeMailFrom,
Result: dmarcrpt.SPFPass,
},
},
}
e1 := e0
e2 := parseJSON(strings.ReplaceAll(packJSON(e0), "sender1.example", "sender2.example"))
e3 := parseJSON(strings.ReplaceAll(packJSON(e0), "10.1.2.3", "10.3.2.1"))
e3.Optional = true
for i, e := range []*Evaluation{&e0, &e1, &e2, &e3} {
e.Evaluated = e.Evaluated.Add(time.Duration(i) * time.Second)
err = AddEvaluation(ctxbg, 3600, e)
tcheckf(t, err, "add evaluation")
}
expStats := map[string]EvaluationStat{
"sender1.example": {
Domain: dns.Domain{ASCII: "sender1.example"},
Dispositions: []string{"none"},
Count: 3,
SendReport: true,
},
"sender2.example": {
Domain: dns.Domain{ASCII: "sender2.example"},
Dispositions: []string{"none"},
Count: 1,
SendReport: true,
},
}
stats, err := EvaluationStats(ctxbg)
tcheckf(t, err, "evaluation stats")
tcompare(t, stats, expStats)
// EvaluationsDomain
evals, err := EvaluationsDomain(ctxbg, dns.Domain{ASCII: "sender1.example"})
tcheckf(t, err, "get evaluations for domain")
tcompare(t, evals, []Evaluation{e0, e1, e3})
evals, err = EvaluationsDomain(ctxbg, dns.Domain{ASCII: "sender2.example"})
tcheckf(t, err, "get evaluations for domain")
tcompare(t, evals, []Evaluation{e2})
evals, err = EvaluationsDomain(ctxbg, dns.Domain{ASCII: "bogus.example"})
tcheckf(t, err, "get evaluations for domain")
tcompare(t, evals, []Evaluation{})
// RemoveEvaluationsDomain
err = RemoveEvaluationsDomain(ctxbg, dns.Domain{ASCII: "sender1.example"})
tcheckf(t, err, "remove evaluations")
expStats = map[string]EvaluationStat{
"sender2.example": {
Domain: dns.Domain{ASCII: "sender2.example"},
Dispositions: []string{"none"},
Count: 1,
SendReport: true,
},
}
stats, err = EvaluationStats(ctxbg)
tcheckf(t, err, "evaluation stats")
tcompare(t, stats, expStats)
}
func TestSendReports(t *testing.T) {
os.RemoveAll("../testdata/dmarcdb/data")
mox.Context = ctxbg
mox.ConfigStaticPath = filepath.FromSlash("../testdata/dmarcdb/mox.conf")
mox.MustLoadConfig(true, false)
os.Remove(mox.DataDirPath("dmarceval.db"))
err := Init()
tcheckf(t, err, "init")
defer func() {
err := Close()
tcheckf(t, err, "close")
}()
resolver := dns.MockResolver{
TXT: map[string][]string{
"_dmarc.sender.example.": {
"v=DMARC1; rua=mailto:dmarcrpt@sender.example; ri=3600",
},
},
}
end := nextWholeHour(time.Now())
eval := Evaluation{
PolicyDomain: "sender.example",
Evaluated: end.Add(-time.Hour / 2),
IntervalHours: 1,
PolicyPublished: dmarcrpt.PolicyPublished{
Domain: "sender.example",
ADKIM: dmarcrpt.AlignmentRelaxed,
ASPF: dmarcrpt.AlignmentRelaxed,
Policy: dmarcrpt.DispositionReject,
SubdomainPolicy: dmarcrpt.DispositionReject,
Percentage: 100,
},
SourceIP: "10.1.2.3",
Disposition: dmarcrpt.DispositionNone,
AlignedDKIMPass: true,
AlignedSPFPass: true,
EnvelopeTo: "mox.example",
EnvelopeFrom: "sender.example",
HeaderFrom: "sender.example",
DKIMResults: []dmarcrpt.DKIMAuthResult{
{
Domain: "sender.example",
Selector: "test",
Result: dmarcrpt.DKIMPass,
},
},
SPFResults: []dmarcrpt.SPFAuthResult{
{
Domain: "sender.example",
Scope: dmarcrpt.SPFDomainScopeMailFrom,
Result: dmarcrpt.SPFPass,
},
},
}
expFeedback := &dmarcrpt.Feedback{
XMLName: xml.Name{Local: "feedback"},
Version: "1.0",
ReportMetadata: dmarcrpt.ReportMetadata{
OrgName: "mail.mox.example",
Email: "postmaster@mail.mox.example",
DateRange: dmarcrpt.DateRange{
Begin: end.Add(-1 * time.Hour).Unix(),
End: end.Add(-time.Second).Unix(),
},
},
PolicyPublished: dmarcrpt.PolicyPublished{
Domain: "sender.example",
ADKIM: dmarcrpt.AlignmentRelaxed,
ASPF: dmarcrpt.AlignmentRelaxed,
Policy: dmarcrpt.DispositionReject,
SubdomainPolicy: dmarcrpt.DispositionReject,
Percentage: 100,
},
Records: []dmarcrpt.ReportRecord{
{
Row: dmarcrpt.Row{
SourceIP: "10.1.2.3",
Count: 1,
PolicyEvaluated: dmarcrpt.PolicyEvaluated{
Disposition: dmarcrpt.DispositionNone,
DKIM: dmarcrpt.DMARCPass,
SPF: dmarcrpt.DMARCPass,
},
},
Identifiers: dmarcrpt.Identifiers{
EnvelopeTo: "mox.example",
EnvelopeFrom: "sender.example",
HeaderFrom: "sender.example",
},
AuthResults: dmarcrpt.AuthResults{
DKIM: []dmarcrpt.DKIMAuthResult{
{
Domain: "sender.example",
Selector: "test",
Result: dmarcrpt.DKIMPass,
},
},
SPF: []dmarcrpt.SPFAuthResult{
{
Domain: "sender.example",
Scope: dmarcrpt.SPFDomainScopeMailFrom,
Result: dmarcrpt.SPFPass,
},
},
},
},
},
}
// Set a timeUntil that we steplock and that causes the actual sleep to return immediately when we want to.
wait := make(chan struct{})
step := make(chan time.Duration)
jitteredTimeUntil = func(_ time.Time) time.Duration {
wait <- struct{}{}
return <-step
}
sleepBetween = func(ctx context.Context, between time.Duration) (ok bool) { return true }
test := func(evals []Evaluation, expAggrAddrs map[string]struct{}, expErrorAddrs map[string]struct{}, optExpReport *dmarcrpt.Feedback) {
t.Helper()
mox.Shutdown, mox.ShutdownCancel = context.WithCancel(ctxbg)
for _, e := range evals {
err := EvalDB.Insert(ctxbg, &e)
tcheckf(t, err, "inserting evaluation")
}
aggrAddrs := map[string]struct{}{}
errorAddrs := map[string]struct{}{}
queueAdd = func(ctx context.Context, log mlog.Log, senderAccount string, msgFile *os.File, qml ...queue.Msg) error {
if len(qml) != 1 {
return fmt.Errorf("queued %d messages, expected 1", len(qml))
}
qm := qml[0]
// Read message file. Also write copy to disk for inspection.
buf, err := io.ReadAll(&moxio.AtReader{R: msgFile})
tcheckf(t, err, "read report message")
err = os.WriteFile("../testdata/dmarcdb/data/report.eml", slices.Concat(qm.MsgPrefix, buf), 0600)
tcheckf(t, err, "write report message")
var feedback *dmarcrpt.Feedback
addr := qm.Recipient().String()
isErrorReport := strings.Contains(string(buf), "DMARC aggregate reporting error report")
if isErrorReport {
errorAddrs[addr] = struct{}{}
} else {
aggrAddrs[addr] = struct{}{}
feedback, err = dmarcrpt.ParseMessageReport(log.Logger, msgFile)
tcheckf(t, err, "parsing generated report message")
}
if optExpReport != nil {
// Parse report in message and compare with expected.
optExpReport.ReportMetadata.ReportID = feedback.ReportMetadata.ReportID
tcompare(t, feedback, expFeedback)
}
return nil
}
Start(resolver)
// Run first loop.
<-wait
step <- 0
<-wait
tcompare(t, aggrAddrs, expAggrAddrs)
tcompare(t, errorAddrs, expErrorAddrs)
// Second loop. Evaluations cleaned, should not result in report messages.
aggrAddrs = map[string]struct{}{}
errorAddrs = map[string]struct{}{}
step <- 0
<-wait
tcompare(t, aggrAddrs, map[string]struct{}{})
tcompare(t, errorAddrs, map[string]struct{}{})
// Caus Start to stop.
mox.ShutdownCancel()
step <- time.Minute
}
// Typical case, with a single address that receives an aggregate report.
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt@sender.example": {}}, map[string]struct{}{}, expFeedback)
// Only optional evaluations, no report at all.
evalOpt := eval
evalOpt.Optional = true
test([]Evaluation{evalOpt}, map[string]struct{}{}, map[string]struct{}{}, nil)
// Address is suppressed.
sa := SuppressAddress{ReportingAddress: "dmarcrpt@sender.example", Until: time.Now().Add(time.Minute)}
err = EvalDB.Insert(ctxbg, &sa)
tcheckf(t, err, "insert suppress address")
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
// Suppression has expired.
sa.Until = time.Now().Add(-time.Minute)
err = EvalDB.Update(ctxbg, &sa)
tcheckf(t, err, "update suppress address")
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt@sender.example": {}}, map[string]struct{}{}, expFeedback)
// Two RUA's, one with a size limit that doesn't pass, and one that does pass.
resolver.TXT["_dmarc.sender.example."] = []string{"v=DMARC1; rua=mailto:dmarcrpt1@sender.example!1,mailto:dmarcrpt2@sender.example!10t; ri=3600"}
test([]Evaluation{eval}, map[string]struct{}{"dmarcrpt2@sender.example": {}}, map[string]struct{}{}, nil)
// Redirect to external domain, without permission, no report sent.
resolver.TXT["_dmarc.sender.example."] = []string{"v=DMARC1; rua=mailto:unauthorized@other.example"}
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
// Redirect to external domain, with basic permission.
resolver.TXT = map[string][]string{
"_dmarc.sender.example.": {"v=DMARC1; rua=mailto:authorized@other.example"},
"sender.example._report._dmarc.other.example.": {"v=DMARC1"},
}
test([]Evaluation{eval}, map[string]struct{}{"authorized@other.example": {}}, map[string]struct{}{}, nil)
// Redirect to authorized external domain, with 2 allowed replacements and 1 invalid and 1 refusing due to size.
resolver.TXT = map[string][]string{
"_dmarc.sender.example.": {"v=DMARC1; rua=mailto:authorized@other.example"},
"sender.example._report._dmarc.other.example.": {"v=DMARC1; rua=mailto:good1@other.example,mailto:bad1@yetanother.example,mailto:good2@other.example,mailto:badsize@other.example!1"},
}
test([]Evaluation{eval}, map[string]struct{}{"good1@other.example": {}, "good2@other.example": {}}, map[string]struct{}{}, nil)
// Without RUA, we send no message.
resolver.TXT = map[string][]string{
"_dmarc.sender.example.": {"v=DMARC1;"},
}
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{}, nil)
// If message size limit is reached, an error repor is sent.
resolver.TXT = map[string][]string{
"_dmarc.sender.example.": {"v=DMARC1; rua=mailto:dmarcrpt@sender.example!1"},
}
test([]Evaluation{eval}, map[string]struct{}{}, map[string]struct{}{"dmarcrpt@sender.example": {}}, nil)
}

View File

@ -1,17 +0,0 @@
package dmarcdb
import (
"fmt"
"os"
"testing"
"github.com/mjl-/mox/metrics"
)
func TestMain(m *testing.M) {
m.Run()
if metrics.Panics.Load() > 0 {
fmt.Println("unhandled panics encountered")
os.Exit(2)
}
}

View File

@ -1,14 +1,9 @@
package dmarcrpt package dmarcrpt
import (
"encoding/xml"
)
// Initially generated by xsdgen, then modified. // Initially generated by xsdgen, then modified.
// Feedback is the top-level XML field returned. // Feedback is the top-level XML field returned.
type Feedback struct { type Feedback struct {
XMLName xml.Name `xml:"feedback" json:"-"` // todo: removing the json tag triggers bug in sherpadoc, should fix.
Version string `xml:"version"` Version string `xml:"version"`
ReportMetadata ReportMetadata `xml:"report_metadata"` ReportMetadata ReportMetadata `xml:"report_metadata"`
PolicyPublished PolicyPublished `xml:"policy_published"` PolicyPublished PolicyPublished `xml:"policy_published"`
@ -31,9 +26,6 @@ type DateRange struct {
// PolicyPublished is the policy as found in DNS for the domain. // PolicyPublished is the policy as found in DNS for the domain.
type PolicyPublished struct { type PolicyPublished struct {
// Domain is where DMARC record was found, not necessarily message From. Reports we
// generate use unicode names, incoming reports may have either ASCII-only or
// Unicode domains.
Domain string `xml:"domain"` Domain string `xml:"domain"`
ADKIM Alignment `xml:"adkim,omitempty"` ADKIM Alignment `xml:"adkim,omitempty"`
ASPF Alignment `xml:"aspf,omitempty"` ASPF Alignment `xml:"aspf,omitempty"`
@ -47,8 +39,6 @@ type PolicyPublished struct {
type Alignment string type Alignment string
const ( const (
AlignmentAbsent Alignment = ""
AlignmentRelaxed Alignment = "r" // Subdomains match the DMARC from-domain. AlignmentRelaxed Alignment = "r" // Subdomains match the DMARC from-domain.
AlignmentStrict Alignment = "s" // Only exact from-domain match. AlignmentStrict Alignment = "s" // Only exact from-domain match.
) )
@ -58,8 +48,6 @@ const (
type Disposition string type Disposition string
const ( const (
DispositionAbsent Disposition = ""
DispositionNone Disposition = "none" DispositionNone Disposition = "none"
DispositionQuarantine Disposition = "quarantine" DispositionQuarantine Disposition = "quarantine"
DispositionReject Disposition = "reject" DispositionReject Disposition = "reject"
@ -91,8 +79,6 @@ type PolicyEvaluated struct {
type DMARCResult string type DMARCResult string
const ( const (
DMARCAbsent DMARCResult = ""
DMARCPass DMARCResult = "pass" DMARCPass DMARCResult = "pass"
DMARCFail DMARCResult = "fail" DMARCFail DMARCResult = "fail"
) )
@ -107,8 +93,6 @@ type PolicyOverrideReason struct {
type PolicyOverride string type PolicyOverride string
const ( const (
PolicyOverrideAbsent PolicyOverride = ""
PolicyOverrideForwarded PolicyOverride = "forwarded" PolicyOverrideForwarded PolicyOverride = "forwarded"
PolicyOverrideSampledOut PolicyOverride = "sampled_out" PolicyOverrideSampledOut PolicyOverride = "sampled_out"
PolicyOverrideTrustedForwarder PolicyOverride = "trusted_forwarder" PolicyOverrideTrustedForwarder PolicyOverride = "trusted_forwarder"
@ -138,8 +122,6 @@ type DKIMAuthResult struct {
type DKIMResult string type DKIMResult string
const ( const (
DKIMAbsent DKIMResult = ""
DKIMNone DKIMResult = "none" DKIMNone DKIMResult = "none"
DKIMPass DKIMResult = "pass" DKIMPass DKIMResult = "pass"
DKIMFail DKIMResult = "fail" DKIMFail DKIMResult = "fail"
@ -158,8 +140,6 @@ type SPFAuthResult struct {
type SPFDomainScope string type SPFDomainScope string
const ( const (
SPFDomainScopeAbsent SPFDomainScope = ""
SPFDomainScopeHelo SPFDomainScope = "helo" // SMTP EHLO SPFDomainScopeHelo SPFDomainScope = "helo" // SMTP EHLO
SPFDomainScopeMailFrom SPFDomainScope = "mfrom" // SMTP "MAIL FROM". SPFDomainScopeMailFrom SPFDomainScope = "mfrom" // SMTP "MAIL FROM".
) )
@ -167,8 +147,6 @@ const (
type SPFResult string type SPFResult string
const ( const (
SPFAbsent SPFResult = ""
SPFNone SPFResult = "none" SPFNone SPFResult = "none"
SPFNeutral SPFResult = "neutral" SPFNeutral SPFResult = "neutral"
SPFPass SPFResult = "pass" SPFPass SPFResult = "pass"

View File

@ -9,16 +9,14 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/http" "net/http"
"strings" "strings"
"github.com/mjl-/mox/message" "github.com/mjl-/mox/message"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/moxio" "github.com/mjl-/mox/moxio"
) )
var ErrNoReport = errors.New("no dmarc aggregate report found in message") var ErrNoReport = errors.New("no dmarc report found in message")
// ParseReport parses an XML aggregate feedback report. // ParseReport parses an XML aggregate feedback report.
// The maximum report size is 20MB. // The maximum report size is 20MB.
@ -35,35 +33,34 @@ func ParseReport(r io.Reader) (*Feedback, error) {
// ParseMessageReport parses an aggregate feedback report from a mail message. The // ParseMessageReport parses an aggregate feedback report from a mail message. The
// maximum message size is 15MB, the maximum report size after decompression is // maximum message size is 15MB, the maximum report size after decompression is
// 20MB. // 20MB.
func ParseMessageReport(elog *slog.Logger, r io.ReaderAt) (*Feedback, error) { func ParseMessageReport(r io.ReaderAt) (*Feedback, error) {
log := mlog.New("dmarcrpt", elog)
// ../rfc/7489:1801 // ../rfc/7489:1801
p, err := message.Parse(log.Logger, true, &moxio.LimitAtReader{R: r, Limit: 15 * 1024 * 1024}) p, err := message.Parse(&moxio.LimitAtReader{R: r, Limit: 15 * 1024 * 1024})
if err != nil { if err != nil {
return nil, fmt.Errorf("parsing mail message: %s", err) return nil, fmt.Errorf("parsing mail message: %s", err)
} }
return parseMessageReport(log, p) return parseMessageReport(p)
} }
func parseMessageReport(log mlog.Log, p message.Part) (*Feedback, error) { func parseMessageReport(p message.Part) (*Feedback, error) {
// Pretty much any mime structure is allowed. ../rfc/7489:1861 // Pretty much any mime structure is allowed. ../rfc/7489:1861
// In practice, some parties will send the report as the only (non-multipart) // In practice, some parties will send the report as the only (non-multipart)
// content of the message. // content of the message.
if p.MediaType != "MULTIPART" { if p.MediaType != "MULTIPART" {
return parseReport(log, p) return parseReport(p)
} }
for { for {
sp, err := p.ParseNextPart(log.Logger) sp, err := p.ParseNextPart()
if err == io.EOF { if err == io.EOF {
return nil, ErrNoReport return nil, ErrNoReport
} }
if err != nil { if err != nil {
return nil, err return nil, err
} }
report, err := parseMessageReport(log, *sp) report, err := parseMessageReport(*sp)
if err == ErrNoReport { if err == ErrNoReport {
continue continue
} else if err != nil || report != nil { } else if err != nil || report != nil {
@ -72,12 +69,12 @@ func parseMessageReport(log mlog.Log, p message.Part) (*Feedback, error) {
} }
} }
func parseReport(log mlog.Log, p message.Part) (*Feedback, error) { func parseReport(p message.Part) (*Feedback, error) {
ct := strings.ToLower(p.MediaType + "/" + p.MediaSubType) ct := strings.ToLower(p.MediaType + "/" + p.MediaSubType)
r := p.Reader() r := p.Reader()
// If no (useful) content-type is set, try to detect it. // If no (useful) content-type is set, try to detect it.
if ct == "" || ct == "application/octet-stream" { if ct == "" || ct == "application/octect-stream" {
data := make([]byte, 512) data := make([]byte, 512)
n, err := io.ReadFull(r, data) n, err := io.ReadFull(r, data)
if err == io.EOF { if err == io.EOF {
@ -93,8 +90,8 @@ func parseReport(log mlog.Log, p message.Part) (*Feedback, error) {
switch ct { switch ct {
case "application/zip": case "application/zip":
// Google sends messages with direct application/zip content-type. // Google sends messages with direct application/zip content-type.
return parseZip(log, r) return parseZip(r)
case "application/gzip", "application/x-gzip": case "application/gzip":
gzr, err := gzip.NewReader(r) gzr, err := gzip.NewReader(r)
if err != nil { if err != nil {
return nil, fmt.Errorf("decoding gzip xml report: %s", err) return nil, fmt.Errorf("decoding gzip xml report: %s", err)
@ -106,7 +103,7 @@ func parseReport(log mlog.Log, p message.Part) (*Feedback, error) {
return nil, ErrNoReport return nil, ErrNoReport
} }
func parseZip(log mlog.Log, r io.Reader) (*Feedback, error) { func parseZip(r io.Reader) (*Feedback, error) {
buf, err := io.ReadAll(r) buf, err := io.ReadAll(r)
if err != nil { if err != nil {
return nil, fmt.Errorf("reading feedback: %s", err) return nil, fmt.Errorf("reading feedback: %s", err)
@ -122,9 +119,6 @@ func parseZip(log mlog.Log, r io.Reader) (*Feedback, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("opening file in zip: %s", err) return nil, fmt.Errorf("opening file in zip: %s", err)
} }
defer func() { defer f.Close()
err := f.Close()
log.Check(err, "closing report file in zip file")
}()
return ParseReport(f) return ParseReport(f)
} }

View File

@ -1,18 +1,12 @@
package dmarcrpt package dmarcrpt
import ( import (
"encoding/xml"
"os" "os"
"path/filepath"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
"github.com/mjl-/mox/mlog"
) )
var pkglog = mlog.New("dmarcrpt", nil)
const reportExample = `<?xml version="1.0" encoding="UTF-8" ?> const reportExample = `<?xml version="1.0" encoding="UTF-8" ?>
<feedback> <feedback>
<report_metadata> <report_metadata>
@ -63,7 +57,6 @@ const reportExample = `<?xml version="1.0" encoding="UTF-8" ?>
func TestParseReport(t *testing.T) { func TestParseReport(t *testing.T) {
var expect = &Feedback{ var expect = &Feedback{
XMLName: xml.Name{Local: "feedback"},
ReportMetadata: ReportMetadata{ ReportMetadata: ReportMetadata{
OrgName: "google.com", OrgName: "google.com",
Email: "noreply-dmarc-support@google.com", Email: "noreply-dmarc-support@google.com",
@ -125,19 +118,19 @@ func TestParseReport(t *testing.T) {
} }
func TestParseMessageReport(t *testing.T) { func TestParseMessageReport(t *testing.T) {
dir := filepath.FromSlash("../testdata/dmarc-reports") const dir = "../testdata/dmarc-reports"
files, err := os.ReadDir(dir) files, err := os.ReadDir(dir)
if err != nil { if err != nil {
t.Fatalf("listing dmarc aggregate report emails: %s", err) t.Fatalf("listing dmarc report emails: %s", err)
} }
for _, file := range files { for _, file := range files {
p := filepath.Join(dir, file.Name()) p := dir + "/" + file.Name()
f, err := os.Open(p) f, err := os.Open(p)
if err != nil { if err != nil {
t.Fatalf("open %q: %s", p, err) t.Fatalf("open %q: %s", p, err)
} }
_, err = ParseMessageReport(pkglog.Logger, f) _, err = ParseMessageReport(f)
if err != nil { if err != nil {
t.Fatalf("ParseMessageReport: %q: %s", p, err) t.Fatalf("ParseMessageReport: %q: %s", p, err)
} }
@ -145,7 +138,7 @@ func TestParseMessageReport(t *testing.T) {
} }
// No report in a non-multipart message. // No report in a non-multipart message.
_, err = ParseMessageReport(pkglog.Logger, strings.NewReader("From: <mjl@mox.example>\r\n\r\nNo report.\r\n")) _, err = ParseMessageReport(strings.NewReader("From: <mjl@mox.example>\r\n\r\nNo report.\r\n"))
if err != ErrNoReport { if err != ErrNoReport {
t.Fatalf("message without report, got err %#v, expected ErrNoreport", err) t.Fatalf("message without report, got err %#v, expected ErrNoreport", err)
} }
@ -171,7 +164,7 @@ MIME-Version: 1.0
--===============5735553800636657282==-- --===============5735553800636657282==--
`, "\n", "\r\n") `, "\n", "\r\n")
_, err = ParseMessageReport(pkglog.Logger, strings.NewReader(multipartNoreport)) _, err = ParseMessageReport(strings.NewReader(multipartNoreport))
if err != ErrNoReport { if err != ErrNoReport {
t.Fatalf("message without report, got err %#v, expected ErrNoreport", err) t.Fatalf("message without report, got err %#v, expected ErrNoreport", err)
} }

View File

@ -9,31 +9,19 @@ import (
"strings" "strings"
"golang.org/x/net/idna" "golang.org/x/net/idna"
"github.com/mjl-/adns"
) )
// Pedantic enables stricter parsing. var errTrailingDot = errors.New("dns name has trailing dot")
var Pedantic bool
var (
errTrailingDot = errors.New("dns name has trailing dot")
errUnderscore = errors.New("domain name with underscore")
errIDNA = errors.New("idna")
errIPNotName = errors.New("ip address while name required")
)
// Domain is a domain name, with one or more labels, with at least an ASCII // Domain is a domain name, with one or more labels, with at least an ASCII
// representation, and for IDNA non-ASCII domains a unicode representation. // representation, and for IDNA non-ASCII domains a unicode representation.
// The ASCII string must be used for DNS lookups. The strings do not have a // The ASCII string must be used for DNS lookups.
// trailing dot. When using with StrictResolver, add the trailing dot.
type Domain struct { type Domain struct {
// A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved // A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved
// letters/digits/hyphens) labels. Always in lower case. No trailing dot. // letters/digits/hyphens) labels. Always in lower case.
ASCII string ASCII string
// Name as U-labels, in Unicode NFC. Empty if this is an ASCII-only domain. No // Name as U-labels. Empty if this is an ASCII-only domain.
// trailing dot.
Unicode string Unicode string
} }
@ -72,8 +60,7 @@ func (d Domain) String() string {
} }
// LogString returns a domain for logging. // LogString returns a domain for logging.
// For IDNA names, the string is the slash-separated Unicode and ASCII name. // For IDNA names, the string contains both the unicode and ASCII name.
// For ASCII-only domain names, just the ASCII string is returned.
func (d Domain) LogString() string { func (d Domain) LogString() string {
if d.Unicode == "" { if d.Unicode == "" {
return d.ASCII return d.ASCII
@ -90,26 +77,18 @@ func (d Domain) IsZero() bool {
// labels (unicode). // labels (unicode).
// Names are IDN-canonicalized and lower-cased. // Names are IDN-canonicalized and lower-cased.
// Characters in unicode can be replaced by equivalents. E.g. "Ⓡ" to "r". This // Characters in unicode can be replaced by equivalents. E.g. "Ⓡ" to "r". This
// means you should only compare parsed domain names, never unparsed strings // means you should only compare parsed domain names, never strings directly.
// directly.
func ParseDomain(s string) (Domain, error) { func ParseDomain(s string) (Domain, error) {
if strings.HasSuffix(s, ".") { if strings.HasSuffix(s, ".") {
return Domain{}, errTrailingDot return Domain{}, errTrailingDot
} }
// IPv4 addresses would be accepted by idna lookups. TLDs cannot be all numerical,
// so IP addresses are not valid DNS names.
if net.ParseIP(s) != nil {
return Domain{}, errIPNotName
}
ascii, err := idna.Lookup.ToASCII(s) ascii, err := idna.Lookup.ToASCII(s)
if err != nil { if err != nil {
return Domain{}, fmt.Errorf("%w: to ascii: %v", errIDNA, err) return Domain{}, fmt.Errorf("to ascii: %w", err)
} }
unicode, err := idna.Lookup.ToUnicode(s) unicode, err := idna.Lookup.ToUnicode(s)
if err != nil { if err != nil {
return Domain{}, fmt.Errorf("%w: to unicode: %w", errIDNA, err) return Domain{}, fmt.Errorf("to unicode: %w", err)
} }
// todo: should we cause errors for unicode domains that were not in // todo: should we cause errors for unicode domains that were not in
// canonical form? we are now accepting all kinds of obscure spellings // canonical form? we are now accepting all kinds of obscure spellings
@ -121,54 +100,16 @@ func ParseDomain(s string) (Domain, error) {
return Domain{ascii, unicode}, nil return Domain{ascii, unicode}, nil
} }
// ParseDomainLax parses a domain like ParseDomain, but allows labels with // IsNotFound returns whether an error is a net.DNSError with IsNotFound set.
// underscores if the entire domain name is ASCII-only non-IDNA and Pedantic mode
// is not enabled. Used for interoperability, e.g. domains may specify MX
// targets with underscores.
func ParseDomainLax(s string) (Domain, error) {
if Pedantic || !strings.Contains(s, "_") {
return ParseDomain(s)
}
// If there is any non-ASCII, this is certainly not an A-label-only domain.
s = strings.ToLower(s)
for _, c := range s {
if c >= 0x80 {
return Domain{}, fmt.Errorf("%w: underscore and non-ascii not allowed", errUnderscore)
}
}
// Try parsing with underscores replaced with allowed ASCII character.
// If that's not valid, the version with underscore isn't either.
repl := strings.ReplaceAll(s, "_", "a")
d, err := ParseDomain(repl)
if err != nil {
return Domain{}, fmt.Errorf("%w: %v", errUnderscore, err)
}
// If we found an IDNA domain, we're not going to allow it.
if d.Unicode != "" {
return Domain{}, fmt.Errorf("%w: idna domain with underscores not allowed", errUnderscore)
}
// Just to be safe, ensure no unexpected conversions happened.
if d.ASCII != repl {
return Domain{}, fmt.Errorf("%w: underscores and non-canonical names not allowed", errUnderscore)
}
return Domain{ASCII: s}, nil
}
// IsNotFound returns whether an error is an adns.DNSError or net.DNSError with
// IsNotFound set.
//
// IsNotFound means the requested type does not exist for the given domain (a // IsNotFound means the requested type does not exist for the given domain (a
// nodata or nxdomain response). It doesn't not necessarily mean no other types for // nodata or nxdomain response). It doesn't not necessarily mean no other types
// that name exist. // for that name exist.
// //
// A DNS server can respond to a lookup with an error "nxdomain" to indicate a // A DNS server can respond to a lookup with an error "nxdomain" to indicate a
// name does not exist (at all), or with a success status with an empty list. // name does not exist (at all), or with a success status with an empty list.
// The adns resolver (just like the Go resolver) returns an IsNotFound error for // The Go resolver returns an IsNotFound error for both cases, there is no need
// both cases, there is no need to explicitly check for zero entries. // to explicitly check for zero entries.
func IsNotFound(err error) bool { func IsNotFound(err error) bool {
var adnsErr *adns.DNSError
var dnsErr *net.DNSError var dnsErr *net.DNSError
return err != nil && (errors.As(err, &adnsErr) && adnsErr.IsNotFound || errors.As(err, &dnsErr) && dnsErr.IsNotFound) return err != nil && errors.As(err, &dnsErr) && dnsErr.IsNotFound
} }

View File

@ -6,15 +6,9 @@ import (
) )
func TestParseDomain(t *testing.T) { func TestParseDomain(t *testing.T) {
test := func(lax bool, s string, exp Domain, expErr error) { test := func(s string, exp Domain, expErr error) {
t.Helper() t.Helper()
var dom Domain dom, err := ParseDomain(s)
var err error
if lax {
dom, err = ParseDomainLax(s)
} else {
dom, err = ParseDomain(s)
}
if (err == nil) != (expErr == nil) || expErr != nil && !errors.Is(err, expErr) { if (err == nil) != (expErr == nil) || expErr != nil && !errors.Is(err, expErr) {
t.Fatalf("parse domain %q: err %v, expected %v", s, err, expErr) t.Fatalf("parse domain %q: err %v, expected %v", s, err, expErr)
} }
@ -24,15 +18,10 @@ func TestParseDomain(t *testing.T) {
} }
// We rely on normalization of names throughout the code base. // We rely on normalization of names throughout the code base.
test(false, "xmox.nl", Domain{"xmox.nl", ""}, nil) test("xmox.nl", Domain{"xmox.nl", ""}, nil)
test(false, "XMOX.NL", Domain{"xmox.nl", ""}, nil) test("XMOX.NL", Domain{"xmox.nl", ""}, nil)
test(false, "TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil) test("TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil)
test(false, "TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil) test("TEST☺.XMOX.NL", Domain{"xn--test-3o3b.xmox.nl", "test☺.xmox.nl"}, nil)
test(false, "ℂᵤⓇℒ。𝐒🄴", Domain{"curl.se", ""}, nil) // https://daniel.haxx.se/blog/2022/12/14/idn-is-crazy/ test("ℂᵤⓇℒ。𝐒🄴", Domain{"curl.se", ""}, nil) // https://daniel.haxx.se/blog/2022/12/14/idn-is-crazy/
test(false, "xmox.nl.", Domain{}, errTrailingDot) test("xmox.nl.", Domain{}, errTrailingDot)
test(false, "_underscore.xmox.nl", Domain{}, errIDNA)
test(true, "_underscore.xmox.NL", Domain{ASCII: "_underscore.xmox.nl"}, nil)
test(true, "_underscore.☺.xmox.nl", Domain{}, errUnderscore)
test(true, "_underscore.xn--test-3o3b.xmox.nl", Domain{}, errUnderscore)
} }

View File

@ -1,36 +0,0 @@
package dns_test
import (
"fmt"
"log"
"github.com/mjl-/mox/dns"
)
func ExampleParseDomain() {
// ASCII-only domain.
basic, err := dns.ParseDomain("example.com")
if err != nil {
log.Fatalf("parse domain: %v", err)
}
fmt.Printf("%s\n", basic)
// IDNA domain xn--74h.example.
smile, err := dns.ParseDomain("☺.example")
if err != nil {
log.Fatalf("parse domain: %v", err)
}
fmt.Printf("%s\n", smile)
// ASCII only domain curl.se in surprisingly allowed spelling.
surprising, err := dns.ParseDomain("ℂᵤⓇℒ。𝐒🄴")
if err != nil {
log.Fatalf("parse domain: %v", err)
}
fmt.Printf("%s\n", surprising)
// Output:
// example.com
// ☺.example/xn--74h.example
// curl.se
}

View File

@ -4,249 +4,183 @@ import (
"context" "context"
"fmt" "fmt"
"net" "net"
"slices"
"github.com/mjl-/adns"
) )
// MockResolver is a Resolver used for testing. // MockResolver is a Resolver used for testing.
// Set DNS records in the fields, which map FQDNs (with trailing dot) to values. // Set DNS records in the fields, which map FQDNs (with trailing dot) to values.
type MockResolver struct { type MockResolver struct {
PTR map[string][]string PTR map[string][]string
A map[string][]string A map[string][]string
AAAA map[string][]string AAAA map[string][]string
TXT map[string][]string TXT map[string][]string
MX map[string][]*net.MX MX map[string][]*net.MX
TLSA map[string][]adns.TLSA // Keys are e.g. _25._tcp.<host>. CNAME map[string]string
CNAME map[string]string Fail map[Mockreq]struct{}
Fail []string // Records of the form "type name", e.g. "cname localhost." that will return a servfail.
AllAuthentic bool // Default value for authentic in responses. Overridden with Authentic and Inauthentic
Authentic []string // Like Fail, but records that cause the response to be authentic.
Inauthentic []string // Like Authentic, but making response inauthentic.
} }
type mockReq struct { type Mockreq struct {
Type string // E.g. "cname", "txt", "mx", "ptr", etc. Type string // E.g. "cname", "txt", "mx", "ptr", etc.
Name string // Name of request. For TLSA, the full requested DNS name, e.g. _25._tcp.<host>. Name string
}
func (mr mockReq) String() string {
return mr.Type + " " + mr.Name
} }
var _ Resolver = MockResolver{} var _ Resolver = MockResolver{}
func (r MockResolver) result(ctx context.Context, mr mockReq) (string, adns.Result, error) { func (r MockResolver) nxdomain(s string) *net.DNSError {
result := adns.Result{Authentic: r.AllAuthentic} return &net.DNSError{
if err := ctx.Err(); err != nil {
return "", result, err
}
updateAuthentic := func(mock string) {
if slices.Contains(r.Authentic, mock) {
result.Authentic = true
}
if slices.Contains(r.Inauthentic, mock) {
result.Authentic = false
}
}
for {
if slices.Contains(r.Fail, mr.String()) {
updateAuthentic(mr.String())
return mr.Name, adns.Result{}, r.servfail(mr.Name)
}
cname, ok := r.CNAME[mr.Name]
if !ok {
updateAuthentic(mr.String())
break
}
updateAuthentic("cname " + mr.Name)
if mr.Type == "cname" {
return mr.Name, result, nil
}
mr.Name = cname
}
return mr.Name, result, nil
}
func (r MockResolver) nxdomain(s string) error {
return &adns.DNSError{
Err: "no record", Err: "no record",
Name: s, Name: s,
Server: "mock", Server: "localhost",
IsNotFound: true, IsNotFound: true,
} }
} }
func (r MockResolver) servfail(s string) error { func (r MockResolver) servfail(s string) *net.DNSError {
return &adns.DNSError{ return &net.DNSError{
Err: "temp error", Err: "temp error",
Name: s, Name: s,
Server: "mock", Server: "localhost",
IsTemporary: true, IsTemporary: true,
} }
} }
func (r MockResolver) LookupCNAME(ctx context.Context, name string) (string, error) {
if err := ctx.Err(); err != nil {
return "", err
}
if _, ok := r.Fail[Mockreq{"cname", name}]; ok {
return "", r.servfail(name)
}
if cname, ok := r.CNAME[name]; ok {
return cname, nil
}
return "", r.nxdomain(name)
}
func (r MockResolver) LookupAddr(ctx context.Context, ip string) ([]string, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
if _, ok := r.Fail[Mockreq{"ptr", ip}]; ok {
return nil, r.servfail(ip)
}
l, ok := r.PTR[ip]
if !ok {
return nil, r.nxdomain(ip)
}
return l, nil
}
func (r MockResolver) LookupNS(ctx context.Context, name string) ([]*net.NS, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
return nil, r.servfail("ns not implemented")
}
func (r MockResolver) LookupPort(ctx context.Context, network, service string) (port int, err error) { func (r MockResolver) LookupPort(ctx context.Context, network, service string) (port int, err error) {
if err := ctx.Err(); err != nil { if err := ctx.Err(); err != nil {
return 0, err return 0, err
} }
return net.LookupPort(network, service) return 0, r.servfail("port not implemented")
} }
func (r MockResolver) LookupCNAME(ctx context.Context, name string) (string, adns.Result, error) { func (r MockResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
mr := mockReq{"cname", name} if err := ctx.Err(); err != nil {
name, result, err := r.result(ctx, mr) return "", nil, err
if err != nil {
return name, result, err
} }
cname, ok := r.CNAME[name] return "", nil, r.servfail("srv not implemented")
if !ok {
return cname, result, r.nxdomain(name)
}
return cname, result, nil
} }
func (r MockResolver) LookupAddr(ctx context.Context, ip string) ([]string, adns.Result, error) { func (r MockResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) {
mr := mockReq{"ptr", ip} if err := ctx.Err(); err != nil {
_, result, err := r.result(ctx, mr) return nil, err
if err != nil {
return nil, result, err
} }
l, ok := r.PTR[ip] if _, ok := r.Fail[Mockreq{"ipaddr", host}]; ok {
if !ok { return nil, r.servfail(host)
return nil, result, r.nxdomain(ip)
} }
return l, result, nil addrs, err := r.LookupHost(ctx, host)
}
func (r MockResolver) LookupNS(ctx context.Context, name string) ([]*net.NS, adns.Result, error) {
mr := mockReq{"ns", name}
_, result, err := r.result(ctx, mr)
if err != nil { if err != nil {
return nil, result, err return nil, err
}
return nil, result, r.servfail("ns not implemented")
}
func (r MockResolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, adns.Result, error) {
xname := fmt.Sprintf("_%s._%s.%s", service, proto, name)
mr := mockReq{"srv", xname}
name, result, err := r.result(ctx, mr)
if err != nil {
return name, nil, result, err
}
return name, nil, result, r.servfail("srv not implemented")
}
func (r MockResolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, adns.Result, error) {
// todo: make closer to resolver, doing a & aaaa lookups, including their error/(in)secure status.
mr := mockReq{"ipaddr", host}
_, result, err := r.result(ctx, mr)
if err != nil {
return nil, result, err
}
addrs, result1, err := r.LookupHost(ctx, host)
result.Authentic = result.Authentic && result1.Authentic
if err != nil {
return nil, result, err
} }
ips := make([]net.IPAddr, len(addrs)) ips := make([]net.IPAddr, len(addrs))
for i, a := range addrs { for i, a := range addrs {
ip := net.ParseIP(a) ip := net.ParseIP(a)
if ip == nil { if ip == nil {
return nil, result, fmt.Errorf("malformed ip %q", a) return nil, fmt.Errorf("malformed ip %q", a)
} }
ips[i] = net.IPAddr{IP: ip} ips[i] = net.IPAddr{IP: ip}
} }
return ips, result, nil return ips, nil
} }
func (r MockResolver) LookupHost(ctx context.Context, host string) ([]string, adns.Result, error) { func (r MockResolver) LookupHost(ctx context.Context, host string) (addrs []string, err error) {
// todo: make closer to resolver, doing a & aaaa lookups, including their error/(in)secure status. if err := ctx.Err(); err != nil {
mr := mockReq{"host", host} return nil, err
_, result, err := r.result(ctx, mr) }
if err != nil { if _, ok := r.Fail[Mockreq{"host", host}]; ok {
return nil, result, err return nil, r.servfail(host)
} }
var addrs []string
addrs = append(addrs, r.A[host]...) addrs = append(addrs, r.A[host]...)
addrs = append(addrs, r.AAAA[host]...) addrs = append(addrs, r.AAAA[host]...)
if len(addrs) == 0 { if len(addrs) > 0 {
return nil, result, r.nxdomain(host) return addrs, nil
} }
return addrs, result, nil if cname, ok := r.CNAME[host]; ok {
return []string{cname}, nil
}
return nil, r.nxdomain(host)
} }
func (r MockResolver) LookupIP(ctx context.Context, network, host string) ([]net.IP, adns.Result, error) { func (r MockResolver) LookupIP(ctx context.Context, network, host string) ([]net.IP, error) {
mr := mockReq{"ip", host} if err := ctx.Err(); err != nil {
name, result, err := r.result(ctx, mr) return nil, err
if err != nil { }
return nil, result, err if _, ok := r.Fail[Mockreq{"ip", host}]; ok {
return nil, r.servfail(host)
} }
var ips []net.IP var ips []net.IP
switch network { switch network {
case "ip", "ip4": case "ip", "ip4":
for _, ip := range r.A[name] { for _, ip := range r.A[host] {
ips = append(ips, net.ParseIP(ip)) ips = append(ips, net.ParseIP(ip))
} }
} }
switch network { switch network {
case "ip", "ip6": case "ip", "ip6":
for _, ip := range r.AAAA[name] { for _, ip := range r.AAAA[host] {
ips = append(ips, net.ParseIP(ip)) ips = append(ips, net.ParseIP(ip))
} }
} }
if len(ips) == 0 { if len(ips) == 0 {
return nil, result, r.nxdomain(host) return nil, r.nxdomain(host)
} }
return ips, result, nil return ips, nil
} }
func (r MockResolver) LookupMX(ctx context.Context, name string) ([]*net.MX, adns.Result, error) { func (r MockResolver) LookupMX(ctx context.Context, name string) ([]*net.MX, error) {
mr := mockReq{"mx", name} if err := ctx.Err(); err != nil {
name, result, err := r.result(ctx, mr) return nil, err
if err != nil { }
return nil, result, err if _, ok := r.Fail[Mockreq{"mx", name}]; ok {
return nil, r.servfail(name)
} }
l, ok := r.MX[name] l, ok := r.MX[name]
if !ok { if !ok {
return nil, result, r.nxdomain(name) return nil, r.nxdomain(name)
} }
return l, result, nil return l, nil
} }
func (r MockResolver) LookupTXT(ctx context.Context, name string) ([]string, adns.Result, error) { func (r MockResolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
mr := mockReq{"txt", name} if err := ctx.Err(); err != nil {
name, result, err := r.result(ctx, mr) return nil, err
if err != nil { }
return nil, result, err if _, ok := r.Fail[Mockreq{"txt", name}]; ok {
return nil, r.servfail(name)
} }
l, ok := r.TXT[name] l, ok := r.TXT[name]
if !ok { if !ok {
return nil, result, r.nxdomain(name) return nil, r.nxdomain(name)
} }
return l, result, nil return l, nil
}
func (r MockResolver) LookupTLSA(ctx context.Context, port int, protocol string, host string) ([]adns.TLSA, adns.Result, error) {
var name string
if port == 0 && protocol == "" {
name = host
} else {
name = fmt.Sprintf("_%d._%s.%s", port, protocol, host)
}
mr := mockReq{"tlsa", name}
name, result, err := r.result(ctx, mr)
if err != nil {
return nil, result, err
}
l, ok := r.TLSA[name]
if !ok {
return nil, result, r.nxdomain(name)
}
return l, result, nil
} }

View File

@ -3,45 +3,50 @@ package dns
import ( import (
"context" "context"
"errors" "errors"
"fmt"
"log/slog"
"net" "net"
"os" "os"
"runtime"
"strings" "strings"
"time" "time"
"github.com/mjl-/adns" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/stub"
) )
// todo future: replace with a dnssec capable resolver // todo future: replace with a dnssec capable resolver
// todo future: change to interface that is closer to DNS. 1. expose nxdomain vs success with zero entries: nxdomain means the name does not exist for any dns resource record type, success with zero records means the name exists for other types than the requested type; 2. add ability to not follow cname records when resolving. the net resolver automatically follows cnames for LookupHost, LookupIP, LookupIPAddr. when resolving names found in mx records, we explicitly must not follow cnames. that seems impossible at the moment. 3. when looking up a cname, actually lookup the record? "net" LookupCNAME will return the requested name with no error if there is no CNAME record. because it returns the canonical name. // todo future: change to interface that is closer to DNS. 1. expose nxdomain vs success with zero entries: nxdomain means the name does not exist for any dns resource record type, success with zero records means the name exists for other types than the requested type; 2. add ability to not follow cname records when resolving. the net resolver automatically follows cnames for LookupHost, LookupIP, LookupIPAddr. when resolving names found in mx records, we explicitly must not follow cnames. that seems impossible at the moment. 3. when looking up a cname, actually lookup the record? "net" LookupCNAME will return the requested name with no error if there is no CNAME record. because it returns the canonical name.
// todo future: add option to not use anything in the cache, for the admin pages where you check the latest DNS settings, ignoring old cached info. // todo future: add option to not use anything in the cache, for the admin pages where you check the latest DNS settings, ignoring old cached info.
func init() { var xlog = mlog.New("dns")
net.DefaultResolver.StrictErrors = true
}
var ( var (
MetricLookup stub.HistogramVec = stub.HistogramVecIgnore{} metricLookup = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "mox_dns_lookup_duration_seconds",
Help: "DNS lookups.",
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20, 30},
},
[]string{
"pkg",
"type", // Lower-case Resolver method name without leading Lookup.
"result", // ok, nxdomain, temporary, timeout, canceled, error
},
)
) )
// Resolver is the interface strict resolver implements. // Resolver is the interface strict resolver implements.
type Resolver interface { type Resolver interface {
LookupAddr(ctx context.Context, addr string) ([]string, error)
LookupCNAME(ctx context.Context, host string) (string, error) // NOTE: returns an error if no CNAME record is present.
LookupHost(ctx context.Context, host string) (addrs []string, err error)
LookupIP(ctx context.Context, network, host string) ([]net.IP, error)
LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error)
LookupMX(ctx context.Context, name string) ([]*net.MX, error)
LookupNS(ctx context.Context, name string) ([]*net.NS, error)
LookupPort(ctx context.Context, network, service string) (port int, err error) LookupPort(ctx context.Context, network, service string) (port int, err error)
LookupAddr(ctx context.Context, addr string) ([]string, adns.Result, error) // Always returns absolute names, with trailing dot. LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error)
LookupCNAME(ctx context.Context, host string) (string, adns.Result, error) // NOTE: returns an error if no CNAME record is present. LookupTXT(ctx context.Context, name string) ([]string, error)
LookupHost(ctx context.Context, host string) ([]string, adns.Result, error)
LookupIP(ctx context.Context, network, host string) ([]net.IP, adns.Result, error)
LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, adns.Result, error)
LookupMX(ctx context.Context, name string) ([]*net.MX, adns.Result, error)
LookupNS(ctx context.Context, name string) ([]*net.NS, adns.Result, error)
LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, adns.Result, error)
LookupTXT(ctx context.Context, name string) ([]string, adns.Result, error)
LookupTLSA(ctx context.Context, port int, protocol, host string) ([]adns.TLSA, adns.Result, error)
} }
// WithPackage sets Pkg on resolver if it is a StrictResolve and does not have a package set yet. // WithPackage sets Pkg on resolver if it is a StrictResolve and does not have a package set yet.
@ -58,17 +63,8 @@ func WithPackage(resolver Resolver, name string) Resolver {
// StrictResolver is a net.Resolver that enforces that DNS names end with a dot, // StrictResolver is a net.Resolver that enforces that DNS names end with a dot,
// preventing "search"-relative lookups. // preventing "search"-relative lookups.
type StrictResolver struct { type StrictResolver struct {
Pkg string // Name of subsystem that is making DNS requests, for metrics. Pkg string // Name of subsystem that is making DNS requests, for metrics.
Resolver *adns.Resolver // Where the actual lookups are done. If nil, adns.DefaultResolver is used for lookups. Resolver *net.Resolver // Where the actual lookups are done. If nil, net.DefaultResolver is used for lookups.
Log *slog.Logger
}
func (r StrictResolver) log() mlog.Log {
pkg := r.Pkg
if pkg == "" {
pkg = "dns"
}
return mlog.New(pkg, r.Log)
} }
var _ Resolver = StrictResolver{} var _ Resolver = StrictResolver{}
@ -77,7 +73,7 @@ var ErrRelativeDNSName = errors.New("dns: host to lookup must be absolute, endin
func metricLookupObserve(pkg, typ string, err error, start time.Time) { func metricLookupObserve(pkg, typ string, err error, start time.Time) {
var result string var result string
var dnsErr *adns.DNSError var dnsErr *net.DNSError
switch { switch {
case err == nil: case err == nil:
result = "ok" result = "ok"
@ -92,7 +88,7 @@ func metricLookupObserve(pkg, typ string, err error, start time.Time) {
default: default:
result = "error" result = "error"
} }
MetricLookup.ObserveLabels(float64(time.Since(start))/float64(time.Second), pkg, typ, result) metricLookup.WithLabelValues(pkg, typ, result).Observe(float64(time.Since(start)) / float64(time.Second))
} }
func (r StrictResolver) WithPackage(name string) Resolver { func (r StrictResolver) WithPackage(name string) Resolver {
@ -103,91 +99,37 @@ func (r StrictResolver) WithPackage(name string) Resolver {
func (r StrictResolver) resolver() Resolver { func (r StrictResolver) resolver() Resolver {
if r.Resolver == nil { if r.Resolver == nil {
return adns.DefaultResolver return net.DefaultResolver
} }
return r.Resolver return r.Resolver
} }
func resolveErrorHint(err *error) { func (r StrictResolver) LookupAddr(ctx context.Context, addr string) (resp []string, err error) {
e := *err
if e == nil {
return
}
dnserr, ok := e.(*adns.DNSError)
if !ok {
return
}
// If the dns server is not running, and it is one of the default/fallback IPs,
// hint at where to look.
if dnserr.IsTemporary && runtime.GOOS == "linux" && (dnserr.Server == "127.0.0.1:53" || dnserr.Server == "[::1]:53") && strings.HasSuffix(dnserr.Err, "connection refused") {
*err = fmt.Errorf("%w (hint: does /etc/resolv.conf point to a running nameserver? in case of systemd-resolved, see systemd-resolved.service(8); better yet, install a proper dnssec-verifying recursive resolver like unbound)", *err)
}
}
func (r StrictResolver) LookupPort(ctx context.Context, network, service string) (resp int, err error) {
start := time.Now()
defer func() {
metricLookupObserve(r.Pkg, "port", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err,
slog.String("type", "port"),
slog.String("network", network),
slog.String("service", service),
slog.Int("resp", resp),
slog.Duration("duration", time.Since(start)),
)
}()
defer resolveErrorHint(&err)
resp, err = r.resolver().LookupPort(ctx, network, service)
return
}
func (r StrictResolver) LookupAddr(ctx context.Context, addr string) (resp []string, result adns.Result, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "addr", err, start) metricLookupObserve(r.Pkg, "addr", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "addr"), mlog.Field("addr", addr), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "addr"),
slog.String("addr", addr),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
resp, result, err = r.resolver().LookupAddr(ctx, addr) resp, err = r.resolver().LookupAddr(ctx, addr)
// For addresses from /etc/hosts without dot, we add the missing trailing dot.
for i, s := range resp {
if !strings.HasSuffix(s, ".") {
resp[i] = s + "."
}
}
return return
} }
// LookupCNAME looks up a CNAME. Unlike "net" LookupCNAME, it returns a "not found" // LookupCNAME looks up a CNAME. Unlike "net" LookupCNAME, it returns a "not found"
// error if there is no CNAME record. // error if there is no CNAME record.
func (r StrictResolver) LookupCNAME(ctx context.Context, host string) (resp string, result adns.Result, err error) { func (r StrictResolver) LookupCNAME(ctx context.Context, host string) (resp string, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "cname", err, start) metricLookupObserve(r.Pkg, "cname", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "cname"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "cname"),
slog.String("host", host),
slog.String("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(host, ".") { if !strings.HasSuffix(host, ".") {
return "", result, ErrRelativeDNSName return "", ErrRelativeDNSName
} }
resp, result, err = r.resolver().LookupCNAME(ctx, host) resp, err = r.resolver().LookupCNAME(ctx, host)
if err == nil && resp == host { if err == nil && resp == host {
return "", result, &adns.DNSError{ return "", &net.DNSError{
Err: "no cname record", Err: "no cname record",
Name: host, Name: host,
Server: "", Server: "",
@ -196,177 +138,111 @@ func (r StrictResolver) LookupCNAME(ctx context.Context, host string) (resp stri
} }
return return
} }
func (r StrictResolver) LookupHost(ctx context.Context, host string) (resp []string, err error) {
func (r StrictResolver) LookupHost(ctx context.Context, host string) (resp []string, result adns.Result, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "host", err, start) metricLookupObserve(r.Pkg, "host", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "host"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "host"),
slog.String("host", host),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(host, ".") { if !strings.HasSuffix(host, ".") {
return nil, result, ErrRelativeDNSName return nil, ErrRelativeDNSName
} }
resp, result, err = r.resolver().LookupHost(ctx, host) resp, err = r.resolver().LookupHost(ctx, host)
return return
} }
func (r StrictResolver) LookupIP(ctx context.Context, network, host string) (resp []net.IP, result adns.Result, err error) { func (r StrictResolver) LookupIP(ctx context.Context, network, host string) (resp []net.IP, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "ip", err, start) metricLookupObserve(r.Pkg, "ip", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ip"), mlog.Field("network", network), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "ip"),
slog.String("network", network),
slog.String("host", host),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(host, ".") { if !strings.HasSuffix(host, ".") {
return nil, result, ErrRelativeDNSName return nil, ErrRelativeDNSName
} }
resp, result, err = r.resolver().LookupIP(ctx, network, host) resp, err = r.resolver().LookupIP(ctx, network, host)
return return
} }
func (r StrictResolver) LookupIPAddr(ctx context.Context, host string) (resp []net.IPAddr, result adns.Result, err error) { func (r StrictResolver) LookupIPAddr(ctx context.Context, host string) (resp []net.IPAddr, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "ipaddr", err, start) metricLookupObserve(r.Pkg, "ipaddr", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ipaddr"), mlog.Field("host", host), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "ipaddr"),
slog.String("host", host),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(host, ".") { if !strings.HasSuffix(host, ".") {
return nil, result, ErrRelativeDNSName return nil, ErrRelativeDNSName
} }
resp, result, err = r.resolver().LookupIPAddr(ctx, host) resp, err = r.resolver().LookupIPAddr(ctx, host)
return return
} }
func (r StrictResolver) LookupMX(ctx context.Context, name string) (resp []*net.MX, result adns.Result, err error) { func (r StrictResolver) LookupMX(ctx context.Context, name string) (resp []*net.MX, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "mx", err, start) metricLookupObserve(r.Pkg, "mx", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "mx"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "mx"),
slog.String("name", name),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(name, ".") { if !strings.HasSuffix(name, ".") {
return nil, result, ErrRelativeDNSName return nil, ErrRelativeDNSName
} }
resp, result, err = r.resolver().LookupMX(ctx, name) resp, err = r.resolver().LookupMX(ctx, name)
return return
} }
func (r StrictResolver) LookupNS(ctx context.Context, name string) (resp []*net.NS, result adns.Result, err error) { func (r StrictResolver) LookupNS(ctx context.Context, name string) (resp []*net.NS, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "ns", err, start) metricLookupObserve(r.Pkg, "ns", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "ns"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "ns"),
slog.String("name", name),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(name, ".") { if !strings.HasSuffix(name, ".") {
return nil, result, ErrRelativeDNSName return nil, ErrRelativeDNSName
} }
resp, result, err = r.resolver().LookupNS(ctx, name) resp, err = r.resolver().LookupNS(ctx, name)
return return
} }
func (r StrictResolver) LookupSRV(ctx context.Context, service, proto, name string) (resp0 string, resp1 []*net.SRV, result adns.Result, err error) { func (r StrictResolver) LookupPort(ctx context.Context, network, service string) (resp int, err error) {
start := time.Now()
defer func() {
metricLookupObserve(r.Pkg, "port", err, start)
xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "port"), mlog.Field("network", network), mlog.Field("service", service), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
}()
resp, err = r.resolver().LookupPort(ctx, network, service)
return
}
func (r StrictResolver) LookupSRV(ctx context.Context, service, proto, name string) (resp0 string, resp1 []*net.SRV, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "srv", err, start) metricLookupObserve(r.Pkg, "srv", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "srv"), mlog.Field("service", service), mlog.Field("proto", proto), mlog.Field("name", name), mlog.Field("resp0", resp0), mlog.Field("resp1", resp1), mlog.Field("duration", time.Since(start)))
slog.String("type", "srv"),
slog.String("service", service),
slog.String("proto", proto),
slog.String("name", name),
slog.String("resp0", resp0),
slog.Any("resp1", resp1),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(name, ".") { if !strings.HasSuffix(name, ".") {
return "", nil, result, ErrRelativeDNSName return "", nil, ErrRelativeDNSName
} }
resp0, resp1, result, err = r.resolver().LookupSRV(ctx, service, proto, name) resp0, resp1, err = r.resolver().LookupSRV(ctx, service, proto, name)
return return
} }
func (r StrictResolver) LookupTXT(ctx context.Context, name string) (resp []string, result adns.Result, err error) { func (r StrictResolver) LookupTXT(ctx context.Context, name string) (resp []string, err error) {
start := time.Now() start := time.Now()
defer func() { defer func() {
metricLookupObserve(r.Pkg, "txt", err, start) metricLookupObserve(r.Pkg, "txt", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err, xlog.WithContext(ctx).Debugx("dns lookup result", err, mlog.Field("pkg", r.Pkg), mlog.Field("type", "txt"), mlog.Field("name", name), mlog.Field("resp", resp), mlog.Field("duration", time.Since(start)))
slog.String("type", "txt"),
slog.String("name", name),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}() }()
defer resolveErrorHint(&err)
if !strings.HasSuffix(name, ".") { if !strings.HasSuffix(name, ".") {
return nil, result, ErrRelativeDNSName return nil, ErrRelativeDNSName
} }
resp, result, err = r.resolver().LookupTXT(ctx, name) resp, err = r.resolver().LookupTXT(ctx, name)
return
}
func (r StrictResolver) LookupTLSA(ctx context.Context, port int, protocol, host string) (resp []adns.TLSA, result adns.Result, err error) {
start := time.Now()
defer func() {
metricLookupObserve(r.Pkg, "tlsa", err, start)
r.log().WithContext(ctx).Debugx("dns lookup result", err,
slog.String("type", "tlsa"),
slog.Int("port", port),
slog.String("protocol", protocol),
slog.String("host", host),
slog.Any("resp", resp),
slog.Bool("authentic", result.Authentic),
slog.Duration("duration", time.Since(start)),
)
}()
defer resolveErrorHint(&err)
if !strings.HasSuffix(host, ".") {
return nil, result, ErrRelativeDNSName
}
resp, result, err = r.resolver().LookupTLSA(ctx, port, protocol, host)
return return
} }

View File

@ -1,39 +1,39 @@
// Package dnsbl implements DNS block lists (RFC 5782), for checking incoming messages from sources without reputation. // Package dnsbl implements DNS block lists (RFC 5782), for checking incoming messages from sources without reputation.
//
// A DNS block list contains IP addresses that should be blocked. The DNSBL is
// queried using DNS "A" lookups. The DNSBL starts at a "zone", e.g.
// "dnsbl.example". To look up whether an IP address is listed, a DNS name is
// composed: For 10.11.12.13, that name would be "13.12.11.10.dnsbl.example". If
// the lookup returns "record does not exist", the IP is not listed. If an IP
// address is returned, the IP is listed. If an IP is listed, an additional TXT
// lookup is done for more information about the block. IPv6 addresses are also
// looked up with an DNS "A" lookup of a name similar to an IPv4 address, but with
// 4-bit hexadecimal dot-separated characters, in reverse.
//
// The health of a DNSBL "zone" can be checked through a lookup of 127.0.0.1
// (must not be present) and 127.0.0.2 (must be present).
package dnsbl package dnsbl
import ( import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/stub"
) )
var xlog = mlog.New("dnsbl")
var ( var (
MetricLookup stub.HistogramVec = stub.HistogramVecIgnore{} metricLookup = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "mox_dnsbl_lookup_duration_seconds",
Help: "DNSBL lookup",
Buckets: []float64{0.001, 0.005, 0.01, 0.05, 0.100, 0.5, 1, 5, 10, 20},
},
[]string{
"zone",
"status",
},
)
) )
var ErrDNS = errors.New("dnsbl: dns error") // Temporary error. var ErrDNS = errors.New("dnsbl: dns error")
// Status is the result of a DNSBL lookup. // Status is the result of a DNSBL lookup.
type Status string type Status string
@ -45,17 +45,12 @@ var (
) )
// Lookup checks if "ip" occurs in the DNS block list "zone" (e.g. dnsbl.example.org). // Lookup checks if "ip" occurs in the DNS block list "zone" (e.g. dnsbl.example.org).
func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, zone dns.Domain, ip net.IP) (rstatus Status, rexplanation string, rerr error) { func Lookup(ctx context.Context, resolver dns.Resolver, zone dns.Domain, ip net.IP) (rstatus Status, rexplanation string, rerr error) {
log := mlog.New("dnsbl", elog) log := xlog.WithContext(ctx)
start := time.Now() start := time.Now()
defer func() { defer func() {
MetricLookup.ObserveLabels(float64(time.Since(start))/float64(time.Second), zone.Name(), string(rstatus)) metricLookup.WithLabelValues(zone.Name(), string(rstatus)).Observe(float64(time.Since(start)) / float64(time.Second))
log.Debugx("dnsbl lookup result", rerr, log.Debugx("dnsbl lookup result", rerr, mlog.Field("zone", zone), mlog.Field("ip", ip), mlog.Field("status", rstatus), mlog.Field("explanation", rexplanation), mlog.Field("duration", time.Since(start)))
slog.Any("zone", zone),
slog.Any("ip", ip),
slog.Any("status", rstatus),
slog.String("explanation", rexplanation),
slog.Duration("duration", time.Since(start)))
}() }()
b := &strings.Builder{} b := &strings.Builder{}
@ -87,18 +82,18 @@ func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, zone
addr := b.String() addr := b.String()
// ../rfc/5782:175 // ../rfc/5782:175
_, _, err := dns.WithPackage(resolver, "dnsbl").LookupIP(ctx, "ip4", addr) _, err := dns.WithPackage(resolver, "dnsbl").LookupIP(ctx, "ip4", addr)
if dns.IsNotFound(err) { if dns.IsNotFound(err) {
return StatusPass, "", nil return StatusPass, "", nil
} else if err != nil { } else if err != nil {
return StatusTemperr, "", fmt.Errorf("%w: %s", ErrDNS, err) return StatusTemperr, "", fmt.Errorf("%w: %s", ErrDNS, err)
} }
txts, _, err := dns.WithPackage(resolver, "dnsbl").LookupTXT(ctx, addr) txts, err := dns.WithPackage(resolver, "dnsbl").LookupTXT(ctx, addr)
if dns.IsNotFound(err) { if dns.IsNotFound(err) {
return StatusFail, "", nil return StatusFail, "", nil
} else if err != nil { } else if err != nil {
log.Debugx("looking up txt record from dnsbl", err, slog.String("addr", addr)) log.Debugx("looking up txt record from dnsbl", err, mlog.Field("addr", addr))
return StatusFail, "", nil return StatusFail, "", nil
} }
return StatusFail, strings.Join(txts, "; "), nil return StatusFail, strings.Join(txts, "; "), nil
@ -109,16 +104,16 @@ func Lookup(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, zone
// Users of a DNSBL should periodically check if the DNSBL is still operating // Users of a DNSBL should periodically check if the DNSBL is still operating
// properly. // properly.
// For temporary errors, ErrDNS is returned. // For temporary errors, ErrDNS is returned.
func CheckHealth(ctx context.Context, elog *slog.Logger, resolver dns.Resolver, zone dns.Domain) (rerr error) { func CheckHealth(ctx context.Context, resolver dns.Resolver, zone dns.Domain) (rerr error) {
log := mlog.New("dnsbl", elog) log := xlog.WithContext(ctx)
start := time.Now() start := time.Now()
defer func() { defer func() {
log.Debugx("dnsbl healthcheck result", rerr, slog.Any("zone", zone), slog.Duration("duration", time.Since(start))) log.Debugx("dnsbl healthcheck result", rerr, mlog.Field("zone", zone), mlog.Field("duration", time.Since(start)))
}() }()
// ../rfc/5782:355 // ../rfc/5782:355
status1, _, err1 := Lookup(ctx, log.Logger, resolver, zone, net.IPv4(127, 0, 0, 1)) status1, _, err1 := Lookup(ctx, resolver, zone, net.IPv4(127, 0, 0, 1))
status2, _, err2 := Lookup(ctx, log.Logger, resolver, zone, net.IPv4(127, 0, 0, 2)) status2, _, err2 := Lookup(ctx, resolver, zone, net.IPv4(127, 0, 0, 2))
if status1 == StatusPass && status2 == StatusFail { if status1 == StatusPass && status2 == StatusFail {
return nil return nil
} else if status1 == StatusFail { } else if status1 == StatusFail {

View File

@ -6,12 +6,10 @@ import (
"testing" "testing"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog"
) )
func TestDNSBL(t *testing.T) { func TestDNSBL(t *testing.T) {
ctx := context.Background() ctx := context.Background()
log := mlog.New("dnsbl", nil)
resolver := dns.MockResolver{ resolver := dns.MockResolver{
A: map[string][]string{ A: map[string][]string{
@ -25,7 +23,7 @@ func TestDNSBL(t *testing.T) {
}, },
} }
if status, expl, err := Lookup(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.1")); err != nil { if status, expl, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.1")); err != nil {
t.Fatalf("lookup: %v", err) t.Fatalf("lookup: %v", err)
} else if status != StatusFail { } else if status != StatusFail {
t.Fatalf("lookup, got status %v, expected fail", status) t.Fatalf("lookup, got status %v, expected fail", status)
@ -33,7 +31,7 @@ func TestDNSBL(t *testing.T) {
t.Fatalf("lookup, got explanation %q", expl) t.Fatalf("lookup, got explanation %q", expl)
} }
if status, expl, err := Lookup(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("2001:db8:1:2:3:4:567:89ab")); err != nil { if status, expl, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("2001:db8:1:2:3:4:567:89ab")); err != nil {
t.Fatalf("lookup: %v", err) t.Fatalf("lookup: %v", err)
} else if status != StatusFail { } else if status != StatusFail {
t.Fatalf("lookup, got status %v, expected fail", status) t.Fatalf("lookup, got status %v, expected fail", status)
@ -41,17 +39,17 @@ func TestDNSBL(t *testing.T) {
t.Fatalf("lookup, got explanation %q", expl) t.Fatalf("lookup, got explanation %q", expl)
} }
if status, _, err := Lookup(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.2")); err != nil { if status, _, err := Lookup(ctx, resolver, dns.Domain{ASCII: "example.com"}, net.ParseIP("10.0.0.2")); err != nil {
t.Fatalf("lookup: %v", err) t.Fatalf("lookup: %v", err)
} else if status != StatusPass { } else if status != StatusPass {
t.Fatalf("lookup, got status %v, expected pass", status) t.Fatalf("lookup, got status %v, expected pass", status)
} }
// ../rfc/5782:357 // ../rfc/5782:357
if err := CheckHealth(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.com"}); err != nil { if err := CheckHealth(ctx, resolver, dns.Domain{ASCII: "example.com"}); err != nil {
t.Fatalf("dnsbl not healthy: %v", err) t.Fatalf("dnsbl not healthy: %v", err)
} }
if err := CheckHealth(ctx, log.Logger, resolver, dns.Domain{ASCII: "example.org"}); err == nil { if err := CheckHealth(ctx, resolver, dns.Domain{ASCII: "example.org"}); err == nil {
t.Fatalf("bad dnsbl is healthy") t.Fatalf("bad dnsbl is healthy")
} }
@ -60,7 +58,7 @@ func TestDNSBL(t *testing.T) {
"1.0.0.127.example.com.": {"127.0.0.2"}, // Should not be present in healthy dnsbl. "1.0.0.127.example.com.": {"127.0.0.2"}, // Should not be present in healthy dnsbl.
}, },
} }
if err := CheckHealth(ctx, log.Logger, unhealthyResolver, dns.Domain{ASCII: "example.com"}); err == nil { if err := CheckHealth(ctx, unhealthyResolver, dns.Domain{ASCII: "example.com"}); err == nil {
t.Fatalf("bad dnsbl is healthy") t.Fatalf("bad dnsbl is healthy")
} }
} }

View File

@ -1,30 +0,0 @@
package dnsbl_test
import (
"context"
"log"
"log/slog"
"net"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/dnsbl"
)
func ExampleLookup() {
ctx := context.Background()
resolver := dns.StrictResolver{}
// Lookup if ip 127.0.0.2 is in spamhaus blocklist at zone sbl.spamhaus.org.
status, explanation, err := dnsbl.Lookup(ctx, slog.Default(), resolver, dns.Domain{ASCII: "sbl.spamhaus.org"}, net.ParseIP("127.0.0.2"))
if err != nil {
log.Fatalf("dnsbl lookup: %v", err)
}
switch status {
case dnsbl.StatusTemperr:
log.Printf("dnsbl lookup, temporary dns error: %v", err)
case dnsbl.StatusPass:
log.Printf("dnsbl lookup, ip not listed")
case dnsbl.StatusFail:
log.Printf("dnsbl lookup, ip listed: %s", explanation)
}
}

1182
doc.go

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,16 @@
version: '3.7'
services: services:
mox: mox:
build: build:
context: . context: .
dockerfile: Dockerfile.moximaptest dockerfile: Dockerfile.moximaptest
volumes: volumes:
- ./testdata/imaptest/config:/mox/config:z - ./testdata/imaptest/config:/mox/config
- ./testdata/imaptest/data:/mox/data:z - ./testdata/imaptest/data:/mox/data
- ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox:z - ./testdata/imaptest/imaptest.mbox:/mox/imaptest.mbox
working_dir: /mox working_dir: /mox
tty: true # For job control with set -m. tty: true # For job control with set -m.
command: sh -c 'set -m; mox serve & sleep 1; echo testtest | mox setaccountpassword mjl; fg' command: sh -c 'set -m; mox serve & sleep 1; echo testtest | mox setaccountpassword mjl@mox.example; fg'
healthcheck: healthcheck:
test: netstat -nlt | grep ':1143 ' test: netstat -nlt | grep ':1143 '
interval: 1s interval: 1s
@ -23,7 +24,7 @@ services:
command: host=mox port=1143 'user=mjl@mox.example' pass=testtest mbox=/imaptest/imaptest.mbox command: host=mox port=1143 'user=mjl@mox.example' pass=testtest mbox=/imaptest/imaptest.mbox
working_dir: /imaptest working_dir: /imaptest
volumes: volumes:
- ./testdata/imaptest:/imaptest:z - ./testdata/imaptest:/imaptest
depends_on: depends_on:
mox: mox:
condition: service_healthy condition: service_healthy

View File

@ -1,47 +1,18 @@
version: '3.7'
services: services:
# We run integration_test.go from this container, it connects to the other mox instances. moxmail:
test: # todo: understand why hostname and/or domainname don't have any influence on the reverse dns set up by docker, requiring us to use our own /etc/resolv.conf...
hostname: test.mox1.example hostname: moxmail1.mox1.example
image: mox_integration_test domainname: mox1.example
# We add our cfssl-generated CA (which is in the repo) and acme pebble CA build:
# (generated each time pebble starts) to the list of trusted CA's, so the TLS dockerfile: Dockerfile.moxmail
# dials in integration_test.go succeed. context: testdata/integration
command: ["sh", "-c", "set -ex; cat /integration/tmp-pebble-ca.pem /integration/tls/ca.pem >>/etc/ssl/certs/ca-certificates.crt; go test -tags integration"]
volumes: volumes:
- ./.go:/.go:z - ./.go:/.go
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z - ./testdata/integration/resolv.conf:/etc/resolv.conf
- ./testdata/integration:/integration:z - .:/mox
- ./testdata/integration/moxsubmit.conf:/etc/moxsubmit.conf:z
- .:/mox:z
environment: environment:
GOCACHE: /.go/.cache/go-build GOCACHE: /.go/.cache/go-build
depends_on:
dns:
condition: service_healthy
# moxmail2 depends on moxacmepebble, we connect to both.
moxmail2:
condition: service_healthy
postfixmail:
condition: service_healthy
localserve:
condition: service_healthy
moxacmepebblealpn:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.50
# First mox instance that uses ACME with pebble.
moxacmepebble:
hostname: moxacmepebble.mox1.example
domainname: mox1.example
image: mox_integration_moxmail
environment:
MOX_UID: "${MOX_UID}"
command: ["sh", "-c", "/integration/moxacmepebble.sh"]
volumes:
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
- ./testdata/integration:/integration:z
healthcheck: healthcheck:
test: netstat -nlt | grep ':25 ' test: netstat -nlt | grep ':25 '
interval: 1s interval: 1s
@ -50,87 +21,15 @@ services:
depends_on: depends_on:
dns: dns:
condition: service_healthy condition: service_healthy
acmepebble: postfixmail:
condition: service_healthy condition: service_healthy
networks: networks:
mailnet1: mailnet1:
ipv4_address: 172.28.1.10 ipv4_address: 172.28.1.10
mailnet2:
# Second mox instance, with TLS cert/keys from files. ipv4_address: 172.28.2.10
moxmail2: mailnet3:
hostname: moxmail2.mox2.example ipv4_address: 172.28.3.10
domainname: mox2.example
image: mox_integration_moxmail
environment:
MOX_UID: "${MOX_UID}"
command: ["sh", "-c", "/integration/moxmail2.sh"]
volumes:
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
- ./testdata/integration:/integration:z
healthcheck:
test: netstat -nlt | grep ':25 '
interval: 1s
timeout: 1s
retries: 10
depends_on:
dns:
condition: service_healthy
acmepebble:
condition: service_healthy
# moxacmepebble creates tmp-pebble-ca.pem, needed by moxmail2 to trust the certificates offered by moxacmepebble.
moxacmepebble:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.20
# Third mox instance that uses ACME with pebble and has ALPN enabled.
moxacmepebblealpn:
hostname: moxacmepebblealpn.mox1.example
domainname: mox1.example
image: mox_integration_moxmail
environment:
MOX_UID: "${MOX_UID}"
command: ["sh", "-c", "/integration/moxacmepebblealpn.sh"]
volumes:
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
- ./testdata/integration:/integration:z
healthcheck:
test: netstat -nlt | grep ':25 '
interval: 1s
timeout: 1s
retries: 10
depends_on:
dns:
condition: service_healthy
acmepebble:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.80
localserve:
hostname: localserve.mox1.example
domainname: mox1.example
image: mox_integration_moxmail
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; mox -checkconsistency localserve -ip 172.28.1.60"]
volumes:
- ./.go:/.go:z
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
- .:/mox:z
environment:
GOCACHE: /.go/.cache/go-build
healthcheck:
test: netstat -nlt | grep ':1025 '
interval: 1s
timeout: 1s
retries: 10
depends_on:
dns:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.60
postfixmail: postfixmail:
hostname: postfixmail.postfix.example hostname: postfixmail.postfix.example
@ -140,8 +39,8 @@ services:
context: testdata/integration context: testdata/integration
volumes: volumes:
# todo: figure out how to mount files with a uid that the process in the container can read... # todo: figure out how to mount files with a uid that the process in the container can read...
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z - ./testdata/integration/resolv.conf:/etc/resolv.conf
command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; (echo 'maillog_file = /dev/stdout'; echo 'mydestination = $$myhostname, localhost.$$mydomain, localhost, $$mydomain'; echo 'smtp_tls_security_level = may') >>/etc/postfix/main.cf; echo 'root: postfix@mox1.example' >>/etc/postfix/aliases; newaliases; postfix start-fg"] command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; (echo 'maillog_file = /dev/stdout'; echo 'mydestination = $$myhostname, localhost.$$mydomain, localhost, $$mydomain') >>/etc/postfix/main.cf; echo 'root: moxtest1@mox1.example' >>/etc/postfix/aliases; newaliases; postfix start-fg"]
healthcheck: healthcheck:
test: netstat -nlt | grep ':25 ' test: netstat -nlt | grep ':25 '
interval: 1s interval: 1s
@ -152,7 +51,7 @@ services:
condition: service_healthy condition: service_healthy
networks: networks:
mailnet1: mailnet1:
ipv4_address: 172.28.1.70 ipv4_address: 172.28.1.20
dns: dns:
hostname: dns.example hostname: dns.example
@ -161,11 +60,9 @@ services:
# todo: figure out how to build from dockerfile with empty context without creating empty dirs in file system. # todo: figure out how to build from dockerfile with empty context without creating empty dirs in file system.
context: testdata/integration context: testdata/integration
volumes: volumes:
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z - ./testdata/integration/resolv.conf:/etc/resolv.conf
- ./testdata/integration:/integration:z - ./testdata/integration:/integration
# We start with a base example.zone, but moxacmepebble appends its records, command: ["sh", "-c", "set -e; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /integration/unbound.conf /integration/*.zone /etc/unbound/; unbound -d -p -v"]
# followed by moxmail2. They restart unbound after appending records.
command: ["sh", "-c", "set -ex; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /integration/unbound.conf /etc/unbound/; chmod 755 /integration; chmod 644 /integration/*.zone; cp /integration/example.zone /integration/example-integration.zone; ls -ld /integration /integration/reverse.zone; unbound -d -p -v"]
healthcheck: healthcheck:
test: netstat -nlu | grep '172.28.1.30:53 ' test: netstat -nlu | grep '172.28.1.30:53 '
interval: 1s interval: 1s
@ -175,31 +72,6 @@ services:
mailnet1: mailnet1:
ipv4_address: 172.28.1.30 ipv4_address: 172.28.1.30
# pebble is a small acme server useful for testing. It creates a new CA
# certificate each time it starts, so we go through some trouble to configure the
# certificate in moxacmepebble and moxmail2.
acmepebble:
hostname: acmepebble.example
image: docker.io/letsencrypt/pebble:v2.3.1@sha256:fc5a537bf8fbc7cc63aa24ec3142283aa9b6ba54529f86eb8ff31fbde7c5b258
volumes:
- ./testdata/integration/resolv.conf:/etc/resolv.conf:z
- ./testdata/integration:/integration:z
command: ["sh", "-c", "set -ex; mount; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; pebble -config /integration/pebble-config.json"]
ports:
- 14000:14000 # ACME port
- 15000:15000 # Management port
healthcheck:
test: netstat -nlt | grep ':14000 '
interval: 1s
timeout: 1s
retries: 10
depends_on:
dns:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.40
networks: networks:
mailnet1: mailnet1:
driver: bridge driver: bridge
@ -207,3 +79,15 @@ networks:
driver: default driver: default
config: config:
- subnet: "172.28.1.0/24" - subnet: "172.28.1.0/24"
mailnet2:
driver: bridge
ipam:
driver: default
config:
- subnet: "172.28.2.0/24"
mailnet3:
driver: bridge
ipam:
driver: default
config:
- subnet: "172.28.3.0/24"

View File

@ -0,0 +1,133 @@
version: '3.7'
services:
# We run quickstart_test.go from this container, it connects to both mox instances.
test:
hostname: test.mox1.example
image: mox_quickstart_test
# We add our cfssl-generated CA (which is in the repo) and acme pebble CA
# (generated each time pebble starts) to the list of trusted CA's, so the TLS
# dials in quickstart_test.go succeed.
command: ["sh", "-c", "set -ex; cat /quickstart/tmp-pebble-ca.pem /quickstart/tls/ca.pem >>/etc/ssl/certs/ca-certificates.crt; go test -tags quickstart"]
volumes:
- ./.go:/.go
- ./testdata/quickstart/resolv.conf:/etc/resolv.conf
- ./testdata/quickstart:/quickstart
- .:/mox
environment:
GOCACHE: /.go/.cache/go-build
depends_on:
dns:
condition: service_healthy
# moxmail2 depends on moxacmepebble, we connect to both.
moxmail2:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.50
# First mox instance that uses ACME with pebble.
moxacmepebble:
hostname: moxacmepebble.mox1.example
domainname: mox1.example
image: mox_quickstart_moxmail
environment:
MOX_UID: "${MOX_UID}"
command: ["sh", "-c", "/quickstart/moxacmepebble.sh"]
volumes:
- ./testdata/quickstart/resolv.conf:/etc/resolv.conf
- ./testdata/quickstart:/quickstart
healthcheck:
test: netstat -nlt | grep ':25 '
interval: 1s
timeout: 1s
retries: 10
depends_on:
dns:
condition: service_healthy
acmepebble:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.10
# Second mox instance, with TLS cert/keys from files.
moxmail2:
hostname: moxmail2.mox2.example
domainname: mox2.example
image: mox_quickstart_moxmail
environment:
MOX_UID: "${MOX_UID}"
command: ["sh", "-c", "/quickstart/moxmail2.sh"]
volumes:
- ./testdata/quickstart/resolv.conf:/etc/resolv.conf
- ./testdata/quickstart:/quickstart
healthcheck:
test: netstat -nlt | grep ':25 '
interval: 1s
timeout: 1s
retries: 10
depends_on:
dns:
condition: service_healthy
acmepebble:
condition: service_healthy
# moxacmepebble creates tmp-pebble-ca.pem, needed by moxmail2 to trust the certificates offered by moxacmepebble.
moxacmepebble:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.20
dns:
hostname: dns.example
build:
dockerfile: Dockerfile.dns
# todo: figure out how to build from dockerfile with empty context without creating empty dirs in file system.
context: testdata/quickstart
volumes:
- ./testdata/quickstart/resolv.conf:/etc/resolv.conf
- ./testdata/quickstart:/quickstart
# We start with a base example.zone, but moxacmepebble appends its records,
# followed by moxmail2. They restart unbound after appending records.
command: ["sh", "-c", "set -ex; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; install -m 640 -o unbound /quickstart/unbound.conf /etc/unbound/; chmod 755 /quickstart; chmod 644 /quickstart/*.zone; cp /quickstart/example.zone /quickstart/example-quickstart.zone; ls -ld /quickstart /quickstart/reverse.zone; unbound -d -p -v"]
healthcheck:
test: netstat -nlu | grep '172.28.1.30:53 '
interval: 1s
timeout: 1s
retries: 10
networks:
mailnet1:
ipv4_address: 172.28.1.30
# pebble is a small acme server useful for testing. It creates a new CA
# certificate each time it starts, so we go through some trouble to configure the
# certificate in moxacmepebble and moxmail2.
acmepebble:
hostname: acmepebble.example
image: docker.io/letsencrypt/pebble:v2.3.1@sha256:fc5a537bf8fbc7cc63aa24ec3142283aa9b6ba54529f86eb8ff31fbde7c5b258
volumes:
- ./testdata/quickstart/resolv.conf:/etc/resolv.conf
- ./testdata/quickstart:/quickstart
command: ["sh", "-c", "set -ex; mount; ls -l /etc/resolv.conf; chmod o+r /etc/resolv.conf; pebble -config /quickstart/pebble-config.json"]
ports:
- 14000:14000 # ACME port
- 15000:15000 # Management port
healthcheck:
test: netstat -nlt | grep ':14000 '
interval: 1s
timeout: 1s
retries: 10
depends_on:
dns:
condition: service_healthy
networks:
mailnet1:
ipv4_address: 172.28.1.40
networks:
mailnet1:
driver: bridge
ipam:
driver: default
config:
- subnet: "172.28.1.0/24"

View File

@ -10,23 +10,8 @@
# After following the quickstart instructions you can start mox: # After following the quickstart instructions you can start mox:
# #
# docker-compose up # docker-compose up
#
#
# If you want to run "mox localserve", you could start it like this:
#
# docker run \
# -p 127.0.0.1:25:1025 \
# -p 127.0.0.1:465:1465 \
# -p 127.0.0.1:587:1587 \
# -p 127.0.0.1:993:1993 \
# -p 127.0.0.1:143:1143 \
# -p 127.0.0.1:443:1443 \
# -p 127.0.0.1:80:1080 \
# r.xmox.nl/mox:latest mox localserve -ip 0.0.0.0
#
# The -ip flag ensures connections to the published ports make it to mox, and it
# prevents listening on ::1 (IPv6 is not enabled in docker by default).
version: '3.7'
services: services:
mox: mox:
# Replace "latest" with the version you want to run, see https://r.xmox.nl/r/mox/. # Replace "latest" with the version you want to run, see https://r.xmox.nl/r/mox/.
@ -38,11 +23,11 @@ services:
# machine, and the IPs of incoming connections for spam filtering. # machine, and the IPs of incoming connections for spam filtering.
network_mode: 'host' network_mode: 'host'
volumes: volumes:
- ./config:/mox/config:z - ./config:/mox/config
- ./data:/mox/data:z - ./data:/mox/data
# web is optional but recommended to bind in, useful for serving static files with # web is optional but recommended to bind in, useful for serving static files with
# the webserver. # the webserver.
- ./web:/mox/web:z - ./web:/mox/web
working_dir: /mox working_dir: /mox
restart: on-failure restart: on-failure
healthcheck: healthcheck:

View File

@ -5,17 +5,21 @@ package dsn
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"mime/multipart" "mime/multipart"
"net/textproto" "net/textproto"
"strconv"
"strings" "strings"
"time" "time"
"github.com/mjl-/mox/dkim"
"github.com/mjl-/mox/message" "github.com/mjl-/mox/message"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/smtp" "github.com/mjl-/mox/smtp"
) )
@ -41,18 +45,6 @@ type Message struct {
// Message subject header, e.g. describing mail delivery failure. // Message subject header, e.g. describing mail delivery failure.
Subject string Subject string
MessageID string
// References header, with Message-ID of original message this DSN is about. So
// mail user-agents will thread the DSN with the original message.
References string
// For message submitted with FUTURERELEASE SMTP extension. Value is either "for;"
// plus original interval in seconds or "until;" plus original UTC RFC3339
// date-time.
FutureReleaseRequest string
// ../rfc/4865:315
// Human-readable text explaining the failure. Line endings should be // Human-readable text explaining the failure. Line endings should be
// bare newlines, not \r\n. They are converted to \r\n when composing. // bare newlines, not \r\n. They are converted to \r\n when composing.
TextBody string TextBody string
@ -99,10 +91,9 @@ type Recipient struct {
Action Action Action Action
// Enhanced status code. First digit indicates permanent or temporary // Enhanced status code. First digit indicates permanent or temporary
// error. // error. If the string contains more than just a status, that
// additional text is added as comment when composing a DSN.
Status string Status string
// For additional details, included in comment.
StatusComment string
// Optional fields. // Optional fields.
// Original intended recipient of message. Used with the DSN extensions ORCPT // Original intended recipient of message. Used with the DSN extensions ORCPT
@ -114,10 +105,10 @@ type Recipient struct {
// deliveries. // deliveries.
RemoteMTA NameIP RemoteMTA NameIP
// DiagnosticCodeSMTP are the full SMTP response lines, space separated. The marshaled // If RemoteMTA is present, DiagnosticCode is from remote. When
// form starts with "smtp; ", this value does not. // creating a DSN, additional text in the string will be added to the
DiagnosticCodeSMTP string // DSN as comment.
DiagnosticCode string
LastAttemptDate time.Time LastAttemptDate time.Time
FinalLogID string FinalLogID string
@ -135,8 +126,8 @@ type Recipient struct {
// supports smtputf8. This influences the message media (sub)types used for the // supports smtputf8. This influences the message media (sub)types used for the
// DSN. // DSN.
// //
// Called may want to add DKIM-Signature headers. // DKIM signatures are added if DKIM signing is configured for the "from" domain.
func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) { func (m *Message) Compose(log *mlog.Log, smtputf8 bool) ([]byte, error) {
// ../rfc/3462:119 // ../rfc/3462:119
// ../rfc/3464:377 // ../rfc/3464:377
// We'll make a multipart/report with 2 or 3 parts: // We'll make a multipart/report with 2 or 3 parts:
@ -167,13 +158,7 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
header("From", fmt.Sprintf("<%s>", m.From.XString(smtputf8))) // todo: would be good to have a local ascii-only name for this address. header("From", fmt.Sprintf("<%s>", m.From.XString(smtputf8))) // todo: would be good to have a local ascii-only name for this address.
header("To", fmt.Sprintf("<%s>", m.To.XString(smtputf8))) // todo: we could just leave this out if it has utf-8 and remote does not support utf-8. header("To", fmt.Sprintf("<%s>", m.To.XString(smtputf8))) // todo: we could just leave this out if it has utf-8 and remote does not support utf-8.
header("Subject", m.Subject) header("Subject", m.Subject)
if m.MessageID == "" { header("Message-Id", fmt.Sprintf("<%s>", mox.MessageIDGen(smtputf8)))
return nil, fmt.Errorf("missing message-id")
}
header("Message-Id", fmt.Sprintf("<%s>", m.MessageID))
if m.References != "" {
header("References", m.References)
}
header("Date", time.Now().Format(message.RFC5322Z)) header("Date", time.Now().Format(message.RFC5322Z))
header("MIME-Version", "1.0") header("MIME-Version", "1.0")
mp := multipart.NewWriter(msgw) mp := multipart.NewWriter(msgw)
@ -236,10 +221,6 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
status("Received-From-MTA", fmt.Sprintf("dns;%s (%s)", m.ReceivedFromMTA.Name, smtp.AddressLiteral(m.ReceivedFromMTA.ConnIP))) status("Received-From-MTA", fmt.Sprintf("dns;%s (%s)", m.ReceivedFromMTA.Name, smtp.AddressLiteral(m.ReceivedFromMTA.ConnIP)))
} }
status("Arrival-Date", m.ArrivalDate.Format(message.RFC5322Z)) // ../rfc/3464:758 status("Arrival-Date", m.ArrivalDate.Format(message.RFC5322Z)) // ../rfc/3464:758
if m.FutureReleaseRequest != "" {
// ../rfc/4865:320
status("Future-Release-Request", m.FutureReleaseRequest)
}
// Then per-recipient fields. ../rfc/3464:769 // Then per-recipient fields. ../rfc/3464:769
// todo: should also handle other address types. at least recognize "unknown". Probably just store this field. ../rfc/3464:819 // todo: should also handle other address types. at least recognize "unknown". Probably just store this field. ../rfc/3464:819
@ -272,23 +253,26 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
st = "2.0.0" st = "2.0.0"
} }
} }
var rest string
st, rest = codeLine(st)
statusLine := st statusLine := st
if r.StatusComment != "" { if rest != "" {
statusLine += " (" + r.StatusComment + ")" statusLine += " (" + rest + ")"
} }
status("Status", statusLine) // ../rfc/3464:975 status("Status", statusLine) // ../rfc/3464:975
if !r.RemoteMTA.IsZero() { if !r.RemoteMTA.IsZero() {
// ../rfc/3464:1015 // ../rfc/3464:1015
s := "dns;" + r.RemoteMTA.Name status("Remote-MTA", fmt.Sprintf("dns;%s (%s)", r.RemoteMTA.Name, smtp.AddressLiteral(r.RemoteMTA.IP)))
if len(r.RemoteMTA.IP) > 0 {
s += " (" + smtp.AddressLiteral(r.RemoteMTA.IP) + ")"
}
status("Remote-MTA", s)
} }
// Presence of Diagnostic-Code indicates the code is from Remote-MTA. ../rfc/3464:1053 // Presence of Diagnostic-Code indicates the code is from Remote-MTA. ../rfc/3464:1053
if r.DiagnosticCodeSMTP != "" { if r.DiagnosticCode != "" {
// ../rfc/3461:1342 ../rfc/6533:589 diagCode, rest := codeLine(r.DiagnosticCode)
status("Diagnostic-Code", "smtp; "+r.DiagnosticCodeSMTP) diagLine := diagCode
if rest != "" {
diagLine += " (" + rest + ")"
}
// ../rfc/6533:589
status("Diagnostic-Code", "smtp; "+diagLine)
} }
if !r.LastAttemptDate.IsZero() { if !r.LastAttemptDate.IsZero() {
status("Last-Attempt-Date", r.LastAttemptDate.Format(message.RFC5322Z)) // ../rfc/3464:1076 status("Last-Attempt-Date", r.LastAttemptDate.Format(message.RFC5322Z)) // ../rfc/3464:1076
@ -311,8 +295,10 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
headers = m.Original headers = m.Original
} else if err != nil { } else if err != nil {
return nil, err return nil, err
} else {
// This is a whole message. We still only include the headers.
// todo: include the whole body.
} }
// Else, this is a whole message. We still only include the headers. todo: include the whole body.
origHdr := textproto.MIMEHeader{} origHdr := textproto.MIMEHeader{}
if smtputf8 { if smtputf8 {
@ -340,7 +326,10 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
data := base64.StdEncoding.EncodeToString(headers) data := base64.StdEncoding.EncodeToString(headers)
for len(data) > 0 { for len(data) > 0 {
line := data line := data
n := min(len(line), 76) // ../rfc/2045:1372 n := len(line)
if n > 78 {
n = 78
}
line, data = data[:n], data[n:] line, data = data[:n], data[n:]
if _, err := origp.Write([]byte(line + "\r\n")); err != nil { if _, err := origp.Write([]byte(line + "\r\n")); err != nil {
return nil, err return nil, err
@ -362,6 +351,17 @@ func (m *Message) Compose(log mlog.Log, smtputf8 bool) ([]byte, error) {
} }
data := msgw.w.Bytes() data := msgw.w.Bytes()
fd := m.From.IPDomain.Domain
confDom, _ := mox.Conf.Domain(fd)
if len(confDom.DKIM.Sign) > 0 {
if dkimHeaders, err := dkim.Sign(context.Background(), m.From.Localpart, fd, confDom.DKIM, smtputf8, bytes.NewReader(data)); err != nil {
log.Errorx("dsn: dkim sign for domain, returning unsigned dsn", err, mlog.Field("domain", fd))
} else {
data = append([]byte(dkimHeaders), data...)
}
}
return data, nil return data, nil
} }
@ -378,3 +378,34 @@ func (w *errWriter) Write(buf []byte) (int, error) {
w.err = err w.err = err
return n, err return n, err
} }
// split a line into enhanced status code and rest.
func codeLine(s string) (string, string) {
t := strings.SplitN(s, " ", 2)
l := strings.Split(t[0], ".")
if len(l) != 3 {
return "", s
}
for i, e := range l {
_, err := strconv.ParseInt(e, 10, 32)
if err != nil {
return "", s
}
if i == 0 && len(e) != 1 {
return "", s
}
}
var rest string
if len(t) == 2 {
rest = t[1]
}
return t[0], rest
}
// HasCode returns whether line starts with an enhanced SMTP status code.
func HasCode(line string) bool {
// ../rfc/3464:986
ecode, _ := codeLine(line)
return ecode != ""
}

View File

@ -2,6 +2,7 @@ package dsn
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -10,14 +11,14 @@ import (
"testing" "testing"
"time" "time"
"github.com/mjl-/mox/dkim"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/message" "github.com/mjl-/mox/message"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/smtp" "github.com/mjl-/mox/smtp"
) )
var pkglog = mlog.New("dsn", nil)
func xparseDomain(s string) dns.Domain { func xparseDomain(s string) dns.Domain {
d, err := dns.ParseDomain(s) d, err := dns.ParseDomain(s)
if err != nil { if err != nil {
@ -32,7 +33,7 @@ func xparseIPDomain(s string) dns.IPDomain {
func tparseMessage(t *testing.T, data []byte, nparts int) (*Message, *message.Part) { func tparseMessage(t *testing.T, data []byte, nparts int) (*Message, *message.Part) {
t.Helper() t.Helper()
m, p, err := Parse(pkglog.Logger, bytes.NewReader(data)) m, p, err := Parse(bytes.NewReader(data))
if err != nil { if err != nil {
t.Fatalf("parsing dsn: %v", err) t.Fatalf("parsing dsn: %v", err)
} }
@ -50,8 +51,8 @@ func tcheckType(t *testing.T, p *message.Part, mt, mst, cte string) {
if !strings.EqualFold(p.MediaSubType, mst) { if !strings.EqualFold(p.MediaSubType, mst) {
t.Fatalf("got mediasubtype %q, expected %q", p.MediaSubType, mst) t.Fatalf("got mediasubtype %q, expected %q", p.MediaSubType, mst)
} }
if !(cte == "" && p.ContentTransferEncoding == nil || cte != "" && p.ContentTransferEncoding != nil && strings.EqualFold(cte, *p.ContentTransferEncoding)) { if !strings.EqualFold(p.ContentTransferEncoding, cte) {
t.Fatalf("got content-transfer-encoding %v, expected %v", p.ContentTransferEncoding, cte) t.Fatalf("got content-transfer-encoding %q, expected %q", p.ContentTransferEncoding, cte)
} }
} }
@ -71,7 +72,7 @@ func tcompareReader(t *testing.T, r io.Reader, exp []byte) {
} }
func TestDSN(t *testing.T) { func TestDSN(t *testing.T) {
log := mlog.New("dsn", nil) log := mlog.New("dsn")
now := time.Now() now := time.Now()
@ -79,16 +80,14 @@ func TestDSN(t *testing.T) {
m := Message{ m := Message{
SMTPUTF8: false, SMTPUTF8: false,
From: smtp.Path{Localpart: "postmaster", IPDomain: xparseIPDomain("mox.example")}, From: smtp.Path{Localpart: "postmaster", IPDomain: xparseIPDomain("mox.example")},
To: smtp.Path{Localpart: "mjl", IPDomain: xparseIPDomain("remote.example")}, To: smtp.Path{Localpart: "mjl", IPDomain: xparseIPDomain("remote.example")},
Subject: "dsn", Subject: "dsn",
MessageID: "test@localhost", TextBody: "delivery failure\n",
TextBody: "delivery failure\n",
ReportingMTA: "mox.example", ReportingMTA: "mox.example",
ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("relay.example"), ConnIP: net.ParseIP("10.10.10.10")}, ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("relay.example"), ConnIP: net.ParseIP("10.10.10.10")},
ArrivalDate: now, ArrivalDate: now,
FutureReleaseRequest: "for;123",
Recipients: []Recipient{ Recipients: []Recipient{
{ {
@ -105,7 +104,6 @@ func TestDSN(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("composing dsn: %v", err) t.Fatalf("composing dsn: %v", err)
} }
pmsg, part := tparseMessage(t, msgbuf, 3) pmsg, part := tparseMessage(t, msgbuf, 3)
tcheckType(t, part, "multipart", "report", "") tcheckType(t, part, "multipart", "report", "")
tcheckType(t, &part.Parts[0], "text", "plain", "7bit") tcheckType(t, &part.Parts[0], "text", "plain", "7bit")
@ -129,15 +127,35 @@ func TestDSN(t *testing.T) {
tcompareReader(t, part.Parts[2].Reader(), m.Original) tcompareReader(t, part.Parts[2].Reader(), m.Original)
tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient) tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient)
// Test for valid DKIM signature.
mox.Context = context.Background()
mox.ConfigStaticPath = "../testdata/dsn/mox.conf"
mox.MustLoadConfig(false)
msgbuf, err = m.Compose(log, false)
if err != nil {
t.Fatalf("composing utf-8 dsn with utf-8 support: %v", err)
}
resolver := &dns.MockResolver{
TXT: map[string][]string{
"testsel._domainkey.mox.example.": {"v=DKIM1;h=sha256;t=s;p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3ZId3ys70VFspp/VMFaxMOrNjHNPg04NOE1iShih16b3Ex7hHBOgC1UvTGSmrMlbCB1OxTXkvf6jW6S4oYRnZYVNygH6zKUwYYhaSaGIg1xA/fDn+IgcTRyLoXizMUgUgpTGyxhNrwIIWv+i7jjbs3TKpP3NU4owQ/rxowmSNqg+fHIF1likSvXvljYS" + "jaFXXnWfYibW7TdDCFFpN4sB5o13+as0u4vLw6MvOi59B1tLype1LcHpi1b9PfxNtznTTdet3kL0paxIcWtKHT0LDPUos8YYmiPa5nGbUqlC7d+4YT2jQPvwGxCws1oo2Tw6nj1UaihneYGAyvEky49FBwIDAQAB"},
},
}
results, err := dkim.Verify(context.Background(), resolver, false, func(*dkim.Sig) error { return nil }, bytes.NewReader(msgbuf), false)
if err != nil {
t.Fatalf("dkim verify: %v", err)
}
if len(results) != 1 || results[0].Status != dkim.StatusPass {
t.Fatalf("dkim result not pass, %#v", results)
}
// An utf-8 message. // An utf-8 message.
m = Message{ m = Message{
SMTPUTF8: true, SMTPUTF8: true,
From: smtp.Path{Localpart: "postmæster", IPDomain: xparseIPDomain("møx.example")}, From: smtp.Path{Localpart: "postmæster", IPDomain: xparseIPDomain("møx.example")},
To: smtp.Path{Localpart: "møx", IPDomain: xparseIPDomain("remøte.example")}, To: smtp.Path{Localpart: "møx", IPDomain: xparseIPDomain("remøte.example")},
Subject: "dsn¡", Subject: "dsn¡",
MessageID: "test@localhost", TextBody: "delivery failure¿\n",
TextBody: "delivery failure¿\n",
ReportingMTA: "mox.example", ReportingMTA: "mox.example",
ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("reläy.example"), ConnIP: net.ParseIP("10.10.10.10")}, ReceivedFromMTA: smtp.Ehlo{Name: xparseIPDomain("reläy.example"), ConnIP: net.ParseIP("10.10.10.10")},
@ -192,3 +210,34 @@ func TestDSN(t *testing.T) {
tcheckType(t, &part.Parts[1], "message", "global-delivery-status", "8bit") tcheckType(t, &part.Parts[1], "message", "global-delivery-status", "8bit")
tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient) tcompare(t, pmsg.Recipients[0].FinalRecipient, m.Recipients[0].FinalRecipient)
} }
func TestCode(t *testing.T) {
testCodeLine := func(line, ecode, rest string) {
t.Helper()
e, r := codeLine(line)
if e != ecode || r != rest {
t.Fatalf("codeLine %q: got %q %q, expected %q %q", line, e, r, ecode, rest)
}
}
testCodeLine("4.0.0", "4.0.0", "")
testCodeLine("4.0.0 more", "4.0.0", "more")
testCodeLine("other", "", "other")
testCodeLine("other more", "", "other more")
testHasCode := func(line string, exp bool) {
t.Helper()
got := HasCode(line)
if got != exp {
t.Fatalf("HasCode %q: got %v, expected %v", line, got, exp)
}
}
testHasCode("4.0.0", true)
testHasCode("5.7.28", true)
testHasCode("10.0.0", false) // first number must be single digit.
testHasCode("4.1.1 more", true)
testHasCode("other ", false)
testHasCode("4.2.", false)
testHasCode("4.2. ", false)
testHasCode(" 4.2.4", false)
testHasCode(" 4.2.4 ", false)
}

View File

@ -4,7 +4,6 @@ import (
"bufio" "bufio"
"fmt" "fmt"
"io" "io"
"log/slog"
"net/textproto" "net/textproto"
"strconv" "strconv"
"strings" "strings"
@ -12,9 +11,7 @@ import (
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/message" "github.com/mjl-/mox/message"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/smtp" "github.com/mjl-/mox/smtp"
"slices"
) )
// Parse reads a DSN message. // Parse reads a DSN message.
@ -25,19 +22,17 @@ import (
// The first return value is the machine-parsed DSN message. The second value is // The first return value is the machine-parsed DSN message. The second value is
// the entire MIME multipart message. Use its Parts field to access the // the entire MIME multipart message. Use its Parts field to access the
// human-readable text and optional original message/headers. // human-readable text and optional original message/headers.
func Parse(elog *slog.Logger, r io.ReaderAt) (*Message, *message.Part, error) { func Parse(r io.ReaderAt) (*Message, *message.Part, error) {
log := mlog.New("dsn", elog)
// DSNs can mix and match subtypes with and without utf-8. ../rfc/6533:441 // DSNs can mix and match subtypes with and without utf-8. ../rfc/6533:441
part, err := message.Parse(log.Logger, false, r) part, err := message.Parse(r)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("parsing message: %v", err) return nil, nil, fmt.Errorf("parsing message: %v", err)
} }
if part.MediaType != "MULTIPART" || part.MediaSubType != "REPORT" { if part.MediaType != "MULTIPART" || part.MediaSubType != "REPORT" {
return nil, nil, fmt.Errorf(`message has content-type %q, must have "message/report"`, strings.ToLower(part.MediaType+"/"+part.MediaSubType)) return nil, nil, fmt.Errorf(`message has content-type %q, must have "message/report"`, strings.ToLower(part.MediaType+"/"+part.MediaSubType))
} }
err = part.Walk(log.Logger, nil) err = part.Walk(nil)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("parsing message parts: %v", err) return nil, nil, fmt.Errorf("parsing message parts: %v", err)
} }
@ -66,11 +61,7 @@ func Parse(elog *slog.Logger, r io.ReaderAt) (*Message, *message.Part, error) {
if err != nil { if err != nil {
return smtp.Path{}, fmt.Errorf("parsing domain: %v", err) return smtp.Path{}, fmt.Errorf("parsing domain: %v", err)
} }
lp, err := smtp.ParseLocalpart(a.User) return smtp.Path{Localpart: smtp.Localpart(a.User), IPDomain: dns.IPDomain{Domain: d}}, nil
if err != nil {
return smtp.Path{}, fmt.Errorf("parsing localpart: %v", err)
}
return smtp.Path{Localpart: lp, IPDomain: dns.IPDomain{Domain: d}}, nil
} }
if len(part.Envelope.From) == 1 { if len(part.Envelope.From) == 1 {
m.From, err = addressPath(part.Envelope.From[0]) m.From, err = addressPath(part.Envelope.From[0])
@ -85,7 +76,7 @@ func Parse(elog *slog.Logger, r io.ReaderAt) (*Message, *message.Part, error) {
} }
} }
m.Subject = part.Envelope.Subject m.Subject = part.Envelope.Subject
buf, err := io.ReadAll(p0.ReaderUTF8OrBinary()) buf, err := io.ReadAll(p0.Reader())
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("reading human-readable text part: %v", err) return nil, nil, fmt.Errorf("reading human-readable text part: %v", err)
} }
@ -218,21 +209,19 @@ func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) {
case "Action": case "Action":
a := Action(strings.ToLower(v)) a := Action(strings.ToLower(v))
actions := []Action{Failed, Delayed, Delivered, Relayed, Expanded} actions := []Action{Failed, Delayed, Delivered, Relayed, Expanded}
if slices.Contains(actions, a) { var ok bool
r.Action = a for _, x := range actions {
} else { if a == x {
ok = true
break
}
}
if !ok {
err = fmt.Errorf("unrecognized action %q", v) err = fmt.Errorf("unrecognized action %q", v)
} }
case "Status": case "Status":
// todo: parse the enhanced status code? // todo: parse the enhanced status code?
r.Status = v r.Status = v
t := strings.SplitN(v, "(", 2)
v = strings.TrimSpace(v)
if len(t) == 2 && strings.HasSuffix(v, ")") {
r.Status = strings.TrimSpace(t[0])
r.StatusComment = strings.TrimSpace(strings.TrimSuffix(t[1], ")"))
}
case "Remote-Mta": case "Remote-Mta":
r.RemoteMTA = NameIP{Name: v} r.RemoteMTA = NameIP{Name: v}
case "Diagnostic-Code": case "Diagnostic-Code":
@ -244,7 +233,7 @@ func parseRecipientHeader(mr *textproto.Reader, utf8 bool) (Recipient, error) {
} else if len(t) != 2 { } else if len(t) != 2 {
err = fmt.Errorf("missing semicolon to separate diagnostic-type from code") err = fmt.Errorf("missing semicolon to separate diagnostic-type from code")
} else { } else {
r.DiagnosticCodeSMTP = strings.TrimSpace(t[1]) r.DiagnosticCode = strings.TrimSpace(t[1])
} }
case "Last-Attempt-Date": case "Last-Attempt-Date":
r.LastAttemptDate, err = parseDateTime(v) r.LastAttemptDate, err = parseDateTime(v)
@ -317,18 +306,17 @@ func parseAddress(s string, utf8 bool) (smtp.Path, error) {
} }
} }
// todo: more proper parser // todo: more proper parser
t = strings.Split(s, "@") t = strings.SplitN(s, "@", 2)
if len(t) == 1 { if len(t) != 2 || t[0] == "" || t[1] == "" {
return smtp.Path{}, fmt.Errorf("invalid email address") return smtp.Path{}, fmt.Errorf("invalid email address")
} }
d, err := dns.ParseDomain(t[len(t)-1]) d, err := dns.ParseDomain(t[1])
if err != nil { if err != nil {
return smtp.Path{}, fmt.Errorf("parsing domain: %v", err) return smtp.Path{}, fmt.Errorf("parsing domain: %v", err)
} }
var lp string var lp string
var esc string var esc string
lead := strings.Join(t[:len(t)-1], "@") for _, c := range t[0] {
for _, c := range lead {
if esc == "" && c == '\\' || esc == `\` && (c == 'x' || c == 'X') || esc == `\x` && c == '{' { if esc == "" && c == '\\' || esc == `\` && (c == 'x' || c == 'X') || esc == `\x` && c == '{' {
if c == 'X' { if c == 'X' {
c = 'x' c = 'x'
@ -352,11 +340,7 @@ func parseAddress(s string, utf8 bool) (smtp.Path, error) {
if esc != "" { if esc != "" {
return smtp.Path{}, fmt.Errorf("parsing localpart: unfinished embedded unicode char") return smtp.Path{}, fmt.Errorf("parsing localpart: unfinished embedded unicode char")
} }
localpart, err := smtp.ParseLocalpart(lp) p := smtp.Path{Localpart: smtp.Localpart(lp), IPDomain: dns.IPDomain{Domain: d}}
if err != nil {
return smtp.Path{}, fmt.Errorf("parsing localpart: %v", err)
}
p := smtp.Path{Localpart: localpart, IPDomain: dns.IPDomain{Domain: d}}
return p, nil return p, nil
} }

View File

@ -1,325 +0,0 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"log"
"reflect"
"strings"
"time"
"github.com/mjl-/sconf"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/webhook"
)
func cmdExample(c *cmd) {
c.params = "[name]"
c.help = `List available examples, or print a specific example.`
args := c.Parse()
if len(args) > 1 {
c.Usage()
}
var match func() string
for _, ex := range examples {
if len(args) == 0 {
fmt.Println(ex.Name)
} else if args[0] == ex.Name {
match = ex.Get
}
}
if len(args) == 0 {
return
}
if match == nil {
log.Fatalln("not found")
}
fmt.Print(match())
}
func cmdConfigExample(c *cmd) {
c.params = "[name]"
c.help = `List available config examples, or print a specific example.`
args := c.Parse()
if len(args) > 1 {
c.Usage()
}
var match func() string
for _, ex := range configExamples {
if len(args) == 0 {
fmt.Println(ex.Name)
} else if args[0] == ex.Name {
match = ex.Get
}
}
if len(args) == 0 {
return
}
if match == nil {
log.Fatalln("not found")
}
fmt.Print(match())
}
var configExamples = []struct {
Name string
Get func() string
}{
{
"webhandlers",
func() string {
const webhandlers = `# Snippet of domains.conf to configure WebDomainRedirects and WebHandlers.
# Redirect all requests for mox.example to https://www.mox.example.
WebDomainRedirects:
mox.example: www.mox.example
# Each request is matched against these handlers until one matches and serves it.
WebHandlers:
-
# Redirect all plain http requests to https, leaving path, query strings, etc
# intact. When the request is already to https, the destination URL would have the
# same scheme, host and path, causing this redirect handler to not match the
# request (and not cause a redirect loop) and the webserver to serve the request
# with a later handler.
LogName: redirhttps
Domain: www.mox.example
PathRegexp: ^/
# Could leave DontRedirectPlainHTTP at false if it wasn't for this being an
# example for doing this redirect.
DontRedirectPlainHTTP: true
WebRedirect:
BaseURL: https://www.mox.example
-
# The name of the handler, used in logging and metrics.
LogName: staticmjl
# With ACME configured, each configured domain will automatically get a TLS
# certificate on first request.
Domain: www.mox.example
PathRegexp: ^/who/mjl/
WebStatic:
StripPrefix: /who/mjl
# Requested path /who/mjl/inferno/ resolves to local web/mjl/inferno.
# If a directory contains an index.html, it is served when a directory is requested.
Root: web/mjl
# With ListFiles true, if a directory does not contain an index.html, the contents are listed.
ListFiles: true
ResponseHeaders:
X-Mox: hi
-
LogName: redir
Domain: www.mox.example
PathRegexp: ^/redir/a/b/c
# Don't redirect from plain HTTP to HTTPS.
DontRedirectPlainHTTP: true
WebRedirect:
# Just change the domain and add query string set fragment. No change to scheme.
# Path will start with /redir/a/b/c (and whathever came after) because no
# OrigPathRegexp+ReplacePath is set.
BaseURL: //moxest.example?q=1#frag
# Default redirection is 308 - Permanent Redirect.
StatusCode: 307
-
LogName: oldnew
Domain: www.mox.example
PathRegexp: ^/old/
WebRedirect:
# Replace path, leaving rest of URL intact.
OrigPathRegexp: ^/old/(.*)
ReplacePath: /new/$1
-
LogName: app
Domain: www.mox.example
PathRegexp: ^/app/
WebForward:
# Strip the path matched by PathRegexp before forwarding the request. So original
# request /app/api become just /api.
StripPath: true
# URL of backend, where requests are forwarded to. The path in the URL is kept,
# so for incoming request URL /app/api, the outgoing request URL has path /app-v2/api.
# Requests are made with Go's net/http DefaultTransporter, including using
# HTTP_PROXY and HTTPS_PROXY environment variables.
URL: http://127.0.0.1:8900/app-v2/
# Add headers to response.
ResponseHeaders:
X-Frame-Options: deny
X-Content-Type-Options: nosniff
`
// Parse just so we know we have the syntax right.
// todo: ideally we would have a complete config file and parse it fully.
var conf struct {
WebDomainRedirects map[string]string
WebHandlers []config.WebHandler
}
err := sconf.Parse(strings.NewReader(webhandlers), &conf)
xcheckf(err, "parsing webhandlers example")
return webhandlers
},
},
{
"transport",
func() string {
const moxconf = `# Snippet for mox.conf, defining a transport called Example that connects on the
# SMTP submission with TLS port 465 ("submissions"), authenticating with
# SCRAM-SHA-256-PLUS (other providers may not support SCRAM-SHA-256-PLUS, but they
# typically do support the older CRAM-MD5).:
# Transport are mechanisms for delivering messages. Transports can be referenced
# from Routes in accounts, domains and the global configuration. There is always
# an implicit/fallback delivery transport doing direct delivery with SMTP from the
# outgoing message queue. Transports are typically only configured when using
# smarthosts, i.e. when delivering through another SMTP server. Zero or one
# transport methods must be set in a transport, never multiple. When using an
# external party to send email for a domain, keep in mind you may have to add
# their IP address to your domain's SPF record, and possibly additional DKIM
# records. (optional)
Transports:
Example:
# Submission SMTP over a TLS connection to submit email to a remote queue.
# (optional)
Submissions:
# Host name to connect to and for verifying its TLS certificate.
Host: smtp.example.com
# If set, authentication credentials for the remote server. (optional)
Auth:
Username: user@example.com
Password: test1234
Mechanisms:
# Allowed authentication mechanisms. Defaults to SCRAM-SHA-256-PLUS,
# SCRAM-SHA-256, SCRAM-SHA-1-PLUS, SCRAM-SHA-1, CRAM-MD5. Not included by default:
# PLAIN. Specify the strongest mechanism known to be implemented by the server to
# prevent mechanism downgrade attacks. (optional)
- SCRAM-SHA-256-PLUS
`
const domainsconf = `# Snippet for domains.conf, specifying a route that sends through the transport:
# Routes for delivering outgoing messages through the queue. Each delivery attempt
# evaluates account routes, domain routes and finally these global routes. The
# transport of the first matching route is used in the delivery attempt. If no
# routes match, which is the default with no configured routes, messages are
# delivered directly from the queue. (optional)
Routes:
-
Transport: Example
`
var static struct {
Transports map[string]config.Transport
}
var dynamic struct {
Routes []config.Route
}
err := sconf.Parse(strings.NewReader(moxconf), &static)
xcheckf(err, "parsing moxconf example")
err = sconf.Parse(strings.NewReader(domainsconf), &dynamic)
xcheckf(err, "parsing domainsconf example")
return moxconf + "\n\n" + domainsconf
},
},
}
var exampleTime = time.Date(2024, time.March, 27, 0, 0, 0, 0, time.UTC)
var examples = []struct {
Name string
Get func() string
}{
{
"webhook-outgoing-delivered",
func() string {
v := webhook.Outgoing{
Version: 0,
Event: webhook.EventDelivered,
QueueMsgID: 101,
FromID: base64.RawURLEncoding.EncodeToString([]byte("0123456789abcdef")),
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
Subject: "subject of original message",
WebhookQueued: exampleTime,
Extra: map[string]string{},
SMTPCode: smtp.C250Completed,
}
return "Example webhook HTTP POST JSON body for successful outgoing delivery:\n\n\t" + formatJSON(v)
},
},
{
"webhook-outgoing-dsn-failed",
func() string {
v := webhook.Outgoing{
Version: 0,
Event: webhook.EventFailed,
DSN: true,
Suppressing: true,
QueueMsgID: 102,
FromID: base64.RawURLEncoding.EncodeToString([]byte("0123456789abcdef")),
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
Subject: "subject of original message",
WebhookQueued: exampleTime,
Extra: map[string]string{"userid": "456"},
Error: "timeout connecting to host",
SMTPCode: smtp.C554TransactionFailed,
SMTPEnhancedCode: "5." + smtp.SeNet4Other0,
}
return `Example webhook HTTP POST JSON body for failed delivery based on incoming DSN
message, with custom extra data fields (from original submission), and adding address to the suppression list:
` + formatJSON(v)
},
},
{
"webhook-incoming-basic",
func() string {
v := webhook.Incoming{
Version: 0,
From: []webhook.NameAddress{{Address: "mox@localhost"}},
To: []webhook.NameAddress{{Address: "mjl@localhost"}},
Subject: "hi",
MessageID: "<QnxzgulZK51utga6agH_rg@mox.example>",
Date: &exampleTime,
Text: "hello world ☺\n",
Structure: webhook.Structure{
ContentType: "text/plain",
ContentTypeParams: map[string]string{"charset": "utf-8"},
DecodedSize: int64(len("hello world ☺\r\n")),
Parts: []webhook.Structure{},
},
Meta: webhook.IncomingMeta{
MsgID: 201,
MailFrom: "mox@localhost",
MailFromValidated: false,
MsgFromValidated: true,
RcptTo: "mjl@localhost",
DKIMVerifiedDomains: []string{"localhost"},
RemoteIP: "127.0.0.1",
Received: exampleTime.Add(3 * time.Second),
MailboxName: "Inbox",
Automated: false,
},
}
return "Example JSON body for webhooks for incoming delivery of basic message:\n\n\t" + formatJSON(v)
},
},
}
func formatJSON(v any) string {
nv, _ := mox.FillNil(reflect.ValueOf(v))
v = nv.Interface()
var b bytes.Buffer
enc := json.NewEncoder(&b)
enc.SetIndent("\t", "\t")
enc.SetEscapeHTML(false)
err := enc.Encode(v)
xcheckf(err, "encoding to json")
return b.String()
}

View File

@ -8,26 +8,25 @@ import (
"github.com/mjl-/bstore" "github.com/mjl-/bstore"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/store" "github.com/mjl-/mox/store"
) )
func cmdExportMaildir(c *cmd) { func cmdExportMaildir(c *cmd) {
c.params = "[-single] dst-dir account-path [mailbox]" c.params = "dst-dir account-path [mailbox]"
c.help = `Export one or all mailboxes from an account in maildir format. c.help = `Export one or all mailboxes from an account in maildir format.
Export bypasses a running mox instance. It opens the account mailbox/message Export bypasses a running mox instance. It opens the account mailbox/message
database file directly. This may block if a running mox instance also has the database file directly. This may block if a running mox instance also has the
database open, e.g. for IMAP connections. To export from a running instance, use database open, e.g. for IMAP connections. To export from a running instance, use
the accounts web page or webmail. the accounts web page.
` `
var single bool
c.flag.BoolVar(&single, "single", false, "export single mailbox, without any children. disabled if mailbox isn't specified.")
args := c.Parse() args := c.Parse()
xcmdExport(false, single, args, c) xcmdExport(false, args, c)
} }
func cmdExportMbox(c *cmd) { func cmdExportMbox(c *cmd) {
c.params = "[-single] dst-dir account-path [mailbox]" c.params = "dst-dir account-path [mailbox]"
c.help = `Export messages from one or all mailboxes in an account in mbox format. c.help = `Export messages from one or all mailboxes in an account in mbox format.
Using mbox is not recommended. Maildir is a better format. Using mbox is not recommended. Maildir is a better format.
@ -35,19 +34,17 @@ Using mbox is not recommended. Maildir is a better format.
Export bypasses a running mox instance. It opens the account mailbox/message Export bypasses a running mox instance. It opens the account mailbox/message
database file directly. This may block if a running mox instance also has the database file directly. This may block if a running mox instance also has the
database open, e.g. for IMAP connections. To export from a running instance, use database open, e.g. for IMAP connections. To export from a running instance, use
the accounts web page or webmail. the accounts web page.
For mbox export, "mboxrd" is used where message lines starting with the magic For mbox export, "mboxrd" is used where message lines starting with the magic
"From " string are escaped by prepending a >. All ">*From " are escaped, "From " string are escaped by prepending a >. All ">*From " are escaped,
otherwise reconstructing the original could lose a ">". otherwise reconstructing the original could lose a ">".
` `
var single bool
c.flag.BoolVar(&single, "single", false, "export single mailbox, without any children. disabled if mailbox isn't specified.")
args := c.Parse() args := c.Parse()
xcmdExport(true, single, args, c) xcmdExport(true, args, c)
} }
func xcmdExport(mbox, single bool, args []string, c *cmd) { func xcmdExport(mbox bool, args []string, c *cmd) {
if len(args) != 2 && len(args) != 3 { if len(args) != 2 && len(args) != 3 {
c.Usage() c.Usage()
} }
@ -57,13 +54,10 @@ func xcmdExport(mbox, single bool, args []string, c *cmd) {
var mailbox string var mailbox string
if len(args) == 3 { if len(args) == 3 {
mailbox = args[2] mailbox = args[2]
} else {
single = false
} }
dbpath := filepath.Join(accountDir, "index.db") dbpath := filepath.Join(accountDir, "index.db")
opts := bstore.Options{Timeout: 5 * time.Second, Perm: 0660, RegisterLogger: c.log.Logger} db, err := bstore.Open(context.Background(), dbpath, &bstore.Options{Timeout: 5 * time.Second, Perm: 0660}, store.DBTypes...)
db, err := bstore.Open(context.Background(), dbpath, &opts, store.DBTypes...)
xcheckf(err, "open database %q", dbpath) xcheckf(err, "open database %q", dbpath)
defer func() { defer func() {
if err := db.Close(); err != nil { if err := db.Close(); err != nil {
@ -72,7 +66,7 @@ func xcmdExport(mbox, single bool, args []string, c *cmd) {
}() }()
a := store.DirArchiver{Dir: dst} a := store.DirArchiver{Dir: dst}
err = store.ExportMessages(context.Background(), c.log, db, accountDir, a, !mbox, mailbox, nil, !single) err = store.ExportMessages(context.Background(), mlog.New("export"), db, accountDir, a, !mbox, mailbox)
xcheckf(err, "exporting messages") xcheckf(err, "exporting messages")
err = a.Close() err = a.Close()
xcheckf(err, "closing archiver") xcheckf(err, "closing archiver")

View File

@ -1,10 +0,0 @@
#!/bin/sh
set -eu
# we rewrite some dmarcprt and tlsrpt enums into untyped strings: real-world
# reports have invalid values, and our loose Go typed strings accept all values,
# but we don't want the typescript runtime checker to fail on those unrecognized
# values.
(cd webadmin && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none -rename 'config Domain ConfigDomain,dmarc Policy DMARCPolicy,mtasts MX STSMX,tlsrptdb Record TLSReportRecord,tlsrptdb SuppressAddress TLSRPTSuppressAddress,dmarcrpt DKIMResult string,dmarcrpt SPFResult string,dmarcrpt SPFDomainScope string,dmarcrpt DMARCResult string,dmarcrpt PolicyOverride string,dmarcrpt Alignment string,dmarcrpt Disposition string,tlsrpt PolicyType string,tlsrpt ResultType string' Admin) >webadmin/api.json
(cd webaccount && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Account) >webaccount/api.json
(cd webmail && CGO_ENABLED=0 go run ../vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/*.go -adjust-function-names none Webmail) >webmail/api.json

101
gendoc.sh
View File

@ -1,34 +1,36 @@
#!/usr/bin/env sh #!/bin/sh
# ./doc.go
( (
cat <<EOF cat <<EOF
/* /*
Command mox is a modern, secure, full-featured, open source mail server for Command mox is a modern full-featured open source secure mail server for
low-maintenance self-hosted email. low-maintenance self-hosted email.
Mox is started with the "serve" subcommand, but mox also has many other - Quick and easy to set up with quickstart and automatic TLS with ACME and
subcommands. Let's Encrypt.
- IMAP4 with extensions for accessing email.
- SMTP with SPF, DKIM, DMARC, DNSBL, MTA-STS, TLSRPT for exchanging email.
- Reputation-based and content-based spam filtering.
- Internationalized email.
- Admin web interface.
Many of those commands talk to a running mox instance, through the ctl file in # Commands
the data directory. Specify the configuration file (that holds the path to the
data directory) through the -config flag or MOXCONF environment variable.
Commands that don't talk to a running mox instance are often for
testing/debugging email functionality. For example for parsing an email message,
or looking up SPF/DKIM/DMARC records.
Below is the usage information as printed by the command when started without
any parameters. Followed by the help and usage information for each command.
# Usage
EOF EOF
./mox 2>&1 | sed -e 's/^usage: */ /' -e 's/^ */ /' ./mox 2>&1 | sed 's/^\( *\|usage: \)/\t/'
echo
./mox helpall 2>&1 cat <<EOF
Many commands talk to a running mox instance, through the ctl file in the data
directory. Specify the configuration file (that holds the path to the data
directory) through the -config flag or MOXCONF environment variable.
EOF
# setting XDG_CONFIG_HOME ensures "mox localserve" has reasonable default
# values in its help output.
XDG_CONFIG_HOME='$userconfigdir' ./mox helpall 2>&1
cat <<EOF cat <<EOF
*/ */
@ -39,70 +41,47 @@ EOF
)>doc.go )>doc.go
gofmt -w doc.go gofmt -w doc.go
# ./config/doc.go
( (
cat <<EOF cat <<EOF
/* /*
Package config holds the configuration file definitions. Package config holds the configuration file definitions for mox.conf (Static)
and domains.conf (Dynamic).
Mox uses two config files: These config files are in "sconf" format. Summarized: Indent with tabs, "#" as
first non-whitespace character makes the line a comment (you cannot have a line
1. mox.conf, also called the static configuration file. with both a value and a comment), strings are not quoted/escaped and can never
2. domains.conf, also called the dynamic configuration file. span multiple lines. See https://pkg.go.dev/github.com/mjl-/sconf for details.
The static configuration file is never reloaded during the lifetime of a
running mox instance. After changes to mox.conf, mox must be restarted for the
changes to take effect.
The dynamic configuration file is reloaded automatically when it changes.
If the file contains an error after the change, the reload is aborted and the
previous version remains active.
Below are "empty" config files, generated from the config file definitions in
the source code, along with comments explaining the fields. Fields named "x" are
placeholders for user-chosen map keys.
# sconf
The config files are in "sconf" format. Properties of sconf files:
- Indentation with tabs only.
- "#" as first non-whitespace character makes the line a comment. Lines with a
value cannot also have a comment.
- Values don't have syntax indicating their type. For example, strings are
not quoted/escaped and can never span multiple lines.
- Fields that are optional can be left out completely. But the value of an
optional field may itself have required fields.
See https://pkg.go.dev/github.com/mjl-/sconf for details.
Annotated empty/default configuration files you could use as a starting point
for your mox.conf and domains.conf, as generated by "mox config
describe-static" and "mox config describe-domains":
# mox.conf # mox.conf
EOF EOF
./mox config describe-static | sed 's/^/ /' ./mox config describe-static | sed 's/^/\t/'
cat <<EOF cat <<EOF
# domains.conf # domains.conf
EOF EOF
./mox config describe-domains | sed 's/^/ /' ./mox config describe-domains | sed 's/^/\t/'
cat <<EOF cat <<EOF
# Examples # Examples
Mox includes configuration files to illustrate common setups. You can see these Mox includes configuration files to illustrate common setups. You can see these
examples with "mox config example", and print a specific example with "mox examples with "mox example", and print a specific example with "mox example
config example <name>". Below are all examples included in mox. <name>". Below are all examples included in mox.
EOF EOF
for ex in $(./mox config example); do for ex in $(./mox example); do
echo '# Example '$ex echo '# Example '$ex
echo echo
./mox config example $ex | sed 's/^/ /' ./mox example $ex | sed 's/^/\t/'
echo echo
done done
@ -114,7 +93,3 @@ package config
EOF EOF
)>config/doc.go )>config/doc.go
gofmt -w config/doc.go gofmt -w config/doc.go
# ./webapi/doc.go
./webapi/gendoc.sh >webapi/doc.go
gofmt -w webapi/doc.go

View File

@ -1,7 +0,0 @@
#!/bin/sh
rm -r licenses
set -e
for p in $(cd vendor && find . -iname '*license*' -or -iname '*licence*' -or -iname '*notice*' -or -iname '*patent*'); do
(set +e; mkdir -p $(dirname licenses/$p))
cp vendor/$p licenses/$p
done

View File

@ -30,7 +30,7 @@ import (
func cmdGentestdata(c *cmd) { func cmdGentestdata(c *cmd) {
c.unlisted = true c.unlisted = true
c.params = "destdir" c.params = "dest-dir"
c.help = `Generate a data directory populated, for testing upgrades.` c.help = `Generate a data directory populated, for testing upgrades.`
args := c.Parse() args := c.Parse()
if len(args) != 1 { if len(args) != 1 {
@ -55,6 +55,8 @@ func cmdGentestdata(c *cmd) {
} }
ctxbg := context.Background() ctxbg := context.Background()
mox.Shutdown = ctxbg
mox.Context = ctxbg
mox.Conf.Log[""] = mlog.LevelInfo mox.Conf.Log[""] = mlog.LevelInfo
mlog.SetConfig(mox.Conf.Log) mlog.SetConfig(mox.Conf.Log)
@ -84,8 +86,8 @@ Accounts:
IgnoreWords: 0.1 IgnoreWords: 0.1
` `
mox.ConfigStaticPath = filepath.FromSlash("/tmp/mox-bogus/mox.conf") mox.ConfigStaticPath = "/tmp/mox-bogus/mox.conf"
mox.ConfigDynamicPath = filepath.FromSlash("/tmp/mox-bogus/domains.conf") mox.ConfigDynamicPath = "/tmp/mox-bogus/domains.conf"
mox.Conf.DynamicLastCheck = time.Now() // Should prevent warning. mox.Conf.DynamicLastCheck = time.Now() // Should prevent warning.
mox.Conf.Static = config.Static{ mox.Conf.Static = config.Static{
DataDir: destDataDir, DataDir: destDataDir,
@ -187,19 +189,13 @@ Accounts:
err = os.WriteFile(filepath.Join(destDataDir, "moxversion"), []byte(moxvar.Version), 0660) err = os.WriteFile(filepath.Join(destDataDir, "moxversion"), []byte(moxvar.Version), 0660)
xcheckf(err, "writing moxversion") xcheckf(err, "writing moxversion")
// Populate auth.db
err = store.Init(ctxbg)
xcheckf(err, "store init")
err = store.TLSPublicKeyAdd(ctxbg, &store.TLSPublicKey{Name: "testkey", Fingerprint: "...", Type: "ecdsa-p256", CertDER: []byte("..."), Account: "test0", LoginAddress: "test0@mox.example"})
xcheckf(err, "adding tlspubkey")
// Populate dmarc.db. // Populate dmarc.db.
err = dmarcdb.Init() err = dmarcdb.Init()
xcheckf(err, "dmarcdb init") xcheckf(err, "dmarcdb init")
report, err := dmarcrpt.ParseReport(strings.NewReader(dmarcReport)) report, err := dmarcrpt.ParseReport(strings.NewReader(dmarcReport))
xcheckf(err, "parsing dmarc aggregate report") xcheckf(err, "parsing dmarc report")
err = dmarcdb.AddReport(ctxbg, report, dns.Domain{ASCII: "mox.example"}) err = dmarcdb.AddReport(ctxbg, report, dns.Domain{ASCII: "mox.example"})
xcheckf(err, "adding dmarc aggregate report") xcheckf(err, "adding dmarc report")
// Populate mtasts.db. // Populate mtasts.db.
err = mtastsdb.Init(false) err = mtastsdb.Init(false)
@ -207,23 +203,22 @@ Accounts:
mtastsPolicy := mtasts.Policy{ mtastsPolicy := mtasts.Policy{
Version: "STSv1", Version: "STSv1",
Mode: mtasts.ModeTesting, Mode: mtasts.ModeTesting,
MX: []mtasts.MX{ MX: []mtasts.STSMX{
{Domain: dns.Domain{ASCII: "mx1.example.com"}}, {Domain: dns.Domain{ASCII: "mx1.example.com"}},
{Domain: dns.Domain{ASCII: "mx2.example.com"}}, {Domain: dns.Domain{ASCII: "mx2.example.com"}},
{Domain: dns.Domain{ASCII: "backup-example.com"}, Wildcard: true}, {Domain: dns.Domain{ASCII: "backup-example.com"}, Wildcard: true},
}, },
MaxAgeSeconds: 1296000, MaxAgeSeconds: 1296000,
} }
err = mtastsdb.Upsert(ctxbg, dns.Domain{ASCII: "mox.example"}, "123", &mtastsPolicy, mtastsPolicy.String()) err = mtastsdb.Upsert(ctxbg, dns.Domain{ASCII: "mox.example"}, "123", &mtastsPolicy)
xcheckf(err, "adding mtastsdb report") xcheckf(err, "adding mtastsdb report")
// Populate tlsrpt.db. // Populate tlsrpt.db.
err = tlsrptdb.Init() err = tlsrptdb.Init()
xcheckf(err, "tlsrptdb init") xcheckf(err, "tlsrptdb init")
tlsreportJSON, err := tlsrpt.Parse(strings.NewReader(tlsReport)) tlsr, err := tlsrpt.Parse(strings.NewReader(tlsReport))
xcheckf(err, "parsing tls report") xcheckf(err, "parsing tls report")
tlsr := tlsreportJSON.Convert() err = tlsrptdb.AddReport(ctxbg, dns.Domain{ASCII: "mox.example"}, "tlsrpt@mox.example", tlsr)
err = tlsrptdb.AddReport(ctxbg, c.log, dns.Domain{ASCII: "mox.example"}, "tlsrpt@mox.example", false, &tlsr)
xcheckf(err, "adding tls report") xcheckf(err, "adding tls report")
// Populate queue, with a message. // Populate queue, with a message.
@ -234,28 +229,23 @@ Accounts:
prefix := []byte{} prefix := []byte{}
mf := tempfile() mf := tempfile()
xcheckf(err, "temp file for queue message") xcheckf(err, "temp file for queue message")
defer store.CloseRemoveTempFile(c.log, mf, "test message") defer mf.Close()
const qmsg = "From: <test0@mox.example>\r\nTo: <other@remote.example>\r\nSubject: test\r\n\r\nthe message...\r\n" const qmsg = "From: <test0@mox.example>\r\nTo: <other@remote.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
_, err = fmt.Fprint(mf, qmsg) _, err = fmt.Fprint(mf, qmsg)
xcheckf(err, "writing message") xcheckf(err, "writing message")
qm := queue.MakeMsg(mailfrom, rcptto, false, false, int64(len(qmsg)), "<test@localhost>", prefix, nil, time.Now(), "test") err = queue.Add(ctxbg, mlog.New("gentestdata"), "test0", mailfrom, rcptto, false, false, int64(len(qmsg)), prefix, mf, nil, true)
err = queue.Add(ctxbg, c.log, "test0", mf, qm)
xcheckf(err, "enqueue message") xcheckf(err, "enqueue message")
// Create three accounts. // Create three accounts.
// First account without messages. // First account without messages.
accTest0, err := store.OpenAccount(c.log, "test0", false) accTest0, err := store.OpenAccount("test0")
xcheckf(err, "open account test0") xcheckf(err, "open account test0")
err = accTest0.ThreadingWait(c.log)
xcheckf(err, "wait for threading to finish")
err = accTest0.Close() err = accTest0.Close()
xcheckf(err, "close account") xcheckf(err, "close account")
// Second account with one message. // Second account with one message.
accTest1, err := store.OpenAccount(c.log, "test1", false) accTest1, err := store.OpenAccount("test1")
xcheckf(err, "open account test1") xcheckf(err, "open account test1")
err = accTest1.ThreadingWait(c.log)
xcheckf(err, "wait for threading to finish")
err = accTest1.DB.Write(ctxbg, func(tx *bstore.Tx) error { err = accTest1.DB.Write(ctxbg, func(tx *bstore.Tx) error {
inbox, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Inbox"}).Get() inbox, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Inbox"}).Get()
xcheckf(err, "looking up inbox") xcheckf(err, "looking up inbox")
@ -263,6 +253,7 @@ Accounts:
m := store.Message{ m := store.Message{
MailboxID: inbox.ID, MailboxID: inbox.ID,
MailboxOrigID: inbox.ID, MailboxOrigID: inbox.ID,
MailboxDestinedID: inbox.ID,
RemoteIP: "1.2.3.4", RemoteIP: "1.2.3.4",
RemoteIPMasked1: "1.2.3.4", RemoteIPMasked1: "1.2.3.4",
RemoteIPMasked2: "1.2.3.0", RemoteIPMasked2: "1.2.3.0",
@ -287,16 +278,12 @@ Accounts:
} }
mf := tempfile() mf := tempfile()
xcheckf(err, "creating temp file for delivery") xcheckf(err, "creating temp file for delivery")
defer store.CloseRemoveTempFile(c.log, mf, "test message")
_, err = fmt.Fprint(mf, msg) _, err = fmt.Fprint(mf, msg)
xcheckf(err, "writing deliver message to file") xcheckf(err, "writing deliver message to file")
err = accTest1.DeliverMessage(mlog.New("gentestdata"), tx, &m, mf, true, false, false, true)
err = accTest1.MessageAdd(c.log, tx, &inbox, &m, mf, store.AddOpts{}) xcheckf(err, "add message to account test1")
xcheckf(err, "deliver message") err = mf.Close()
xcheckf(err, "closing file")
err = tx.Update(&inbox)
xcheckf(err, "update inbox")
return nil return nil
}) })
xcheckf(err, "write transaction with new message") xcheckf(err, "write transaction with new message")
@ -304,10 +291,8 @@ Accounts:
xcheckf(err, "close account") xcheckf(err, "close account")
// Third account with two messages and junkfilter. // Third account with two messages and junkfilter.
accTest2, err := store.OpenAccount(c.log, "test2", false) accTest2, err := store.OpenAccount("test2")
xcheckf(err, "open account test2") xcheckf(err, "open account test2")
err = accTest2.ThreadingWait(c.log)
xcheckf(err, "wait for threading to finish")
err = accTest2.DB.Write(ctxbg, func(tx *bstore.Tx) error { err = accTest2.DB.Write(ctxbg, func(tx *bstore.Tx) error {
inbox, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Inbox"}).Get() inbox, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Inbox"}).Get()
xcheckf(err, "looking up inbox") xcheckf(err, "looking up inbox")
@ -315,6 +300,7 @@ Accounts:
m0 := store.Message{ m0 := store.Message{
MailboxID: inbox.ID, MailboxID: inbox.ID,
MailboxOrigID: inbox.ID, MailboxOrigID: inbox.ID,
MailboxDestinedID: inbox.ID,
RemoteIP: "::1", RemoteIP: "::1",
RemoteIPMasked1: "::", RemoteIPMasked1: "::",
RemoteIPMasked2: "::", RemoteIPMasked2: "::",
@ -339,34 +325,33 @@ Accounts:
} }
mf0 := tempfile() mf0 := tempfile()
xcheckf(err, "creating temp file for delivery") xcheckf(err, "creating temp file for delivery")
defer store.CloseRemoveTempFile(c.log, mf0, "test message")
_, err = fmt.Fprint(mf0, msg0) _, err = fmt.Fprint(mf0, msg0)
xcheckf(err, "writing deliver message to file") xcheckf(err, "writing deliver message to file")
err = accTest2.MessageAdd(c.log, tx, &inbox, &m0, mf0, store.AddOpts{}) err = accTest2.DeliverMessage(mlog.New("gentestdata"), tx, &m0, mf0, true, false, false, false)
xcheckf(err, "add message to account test2") xcheckf(err, "add message to account test2")
err = tx.Update(&inbox) err = mf0.Close()
xcheckf(err, "update inbox") xcheckf(err, "closing file")
sent, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Sent"}).Get() sent, err := bstore.QueryTx[store.Mailbox](tx).FilterNonzero(store.Mailbox{Name: "Sent"}).Get()
xcheckf(err, "looking up inbox") xcheckf(err, "looking up inbox")
const prefix1 = "Extra: test\r\n" const prefix1 = "Extra: test\r\n"
const msg1 = "From: <other@remote.example>\r\nTo: <☹@xn--74h.example>\r\nSubject: test\r\n\r\nthe message...\r\n" const msg1 = "From: <other@remote.example>\r\nTo: <☹@xn--74h.example>\r\nSubject: test\r\n\r\nthe message...\r\n"
m1 := store.Message{ m1 := store.Message{
MailboxID: sent.ID, MailboxID: sent.ID,
MailboxOrigID: sent.ID, MailboxOrigID: sent.ID,
Flags: store.Flags{Seen: true, Junk: true}, MailboxDestinedID: sent.ID,
Size: int64(len(prefix1) + len(msg1)), Flags: store.Flags{Seen: true, Junk: true},
MsgPrefix: []byte(prefix1), Size: int64(len(prefix1) + len(msg1)),
MsgPrefix: []byte(prefix),
} }
mf1 := tempfile() mf1 := tempfile()
xcheckf(err, "creating temp file for delivery") xcheckf(err, "creating temp file for delivery")
defer store.CloseRemoveTempFile(c.log, mf1, "test message")
_, err = fmt.Fprint(mf1, msg1) _, err = fmt.Fprint(mf1, msg1)
xcheckf(err, "writing deliver message to file") xcheckf(err, "writing deliver message to file")
err = accTest2.MessageAdd(c.log, tx, &sent, &m1, mf1, store.AddOpts{}) err = accTest2.DeliverMessage(mlog.New("gentestdata"), tx, &m1, mf1, true, true, false, false)
xcheckf(err, "add message to account test2") xcheckf(err, "add message to account test2")
err = tx.Update(&sent) err = mf1.Close()
xcheckf(err, "update sent") xcheckf(err, "closing file")
return nil return nil
}) })

View File

@ -1,11 +0,0 @@
#!/bin/sh
set -eu
# generate new typescript client, only install it when it is different, so we
# don't trigger frontend builds needlessly.
go run vendor/github.com/mjl-/sherpats/cmd/sherpats/main.go -bytes-to-string -slices-nullable -maps-nullable -nullable-optional -namespace api api <$1 >$2.tmp
if cmp -s $2 $2.tmp; then
rm $2.tmp
else
mv $2.tmp $2
fi

View File

@ -1,117 +0,0 @@
#!/usr/bin/env bash
mkdir website/html 2>/dev/null
rm -r website/html/* 2>/dev/null
set -euo pipefail
commithash=$(git rev-parse --short HEAD)
commitdate=$(git log -1 --date=format:"%Y-%m-%d" --format="%ad")
export commithash
export commitdate
# Link to static files and cross-references.
ln -sf ../../../mox-website-files/files website/html/files
ln -sf ../../rfc/xr website/html/xr
# All commands below are executed relative to ./website/
cd website
go run website.go -root -title 'Mox: modern, secure, all-in-one mail server' 'Mox' < index.md >html/index.html
mkdir html/features
(
cat features/index.md
echo
sed -n -e 's/^# Roadmap/## Roadmap/' -e '/# FAQ/q' -e '/# Roadmap/,/# FAQ/p' < ../README.md
echo
echo 'Also see the [Protocols](../protocols/) page for implementation status, and (non)-plans.'
) | go run website.go 'Features' >html/features/index.html
mkdir html/screenshots
go run website.go 'Screenshots' < screenshots/index.md >html/screenshots/index.html
mkdir html/install
go run website.go 'Install' < install/index.md >html/install/index.html
mkdir html/faq
sed -n '/# FAQ/,//p' < ../README.md | go run website.go 'FAQ' >html/faq/index.html
mkdir html/config
(
echo '# Config reference'
echo
sed -n '/^Package config holds /,/\*\//p' < ../config/doc.go | grep -v -E '^(Package config holds |\*/)' | sed 's/^# /## /'
) | go run website.go 'Config reference' >html/config/index.html
mkdir html/commands
(
echo '# Command reference'
echo
sed -n '/^Mox is started /,/\*\//p' < ../doc.go | grep -v '\*/' | sed 's/^# /## /'
) | go run website.go 'Command reference' >html/commands/index.html
mkdir html/protocols
go run website.go -protocols 'Protocols' <../rfc/index.txt >html/protocols/index.html
mkdir html/b
cat <<'EOF' >html/b/index.html
<!doctype html>
<html>
<head>
<meta charset="utf-8" />
<title>mox build</title>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<link rel="icon" href="noNeedlessFaviconRequestsPlease:" />
<style>
body { padding: 1em; }
* { font-size: 18px; font-family: ubuntu, lato, sans-serif; margin: 0; padding: 0; box-sizing: border-box; }
p { max-width: 50em; margin-bottom: 2ex; }
pre { font-family: 'ubuntu mono', monospace; }
pre, blockquote { padding: 1em; background-color: #eee; border-radius: .25em; display: inline-block; margin-bottom: 1em; }
h1 { margin: 1em 0 .5em 0; }
</style>
</head>
<body>
<script>
const elem = (name, ...s) => {
const e = document.createElement(name)
e.append(...s)
return e
}
const link = (url, anchor) => {
const e = document.createElement('a')
e.setAttribute('href', url)
e.setAttribute('rel', 'noopener')
e.append(anchor || url)
return e
}
let h = location.hash.substring(1)
const ok = /^[a-zA-Z0-9_\.]+$/.test(h)
if (!ok) {
h = '<tag-or-branch-or-commithash>'
}
const init = () => {
document.body.append(
elem('p', 'Compile or download any version of mox, by tag (release), branch or commit hash.'),
elem('h1', 'Compile'),
elem('p', 'Run:'),
elem('pre', 'CGO_ENABLED=0 GOBIN=$PWD go install github.com/mjl-/mox@'+h),
elem('p', 'Mox is tested with the Go toolchain versions that are still have support: The most recent version, and the version before.'),
elem('h1', 'Download'),
elem('p', 'Download a binary for your platform:'),
elem('blockquote', ok ?
link('https://beta.gobuilds.org/github.com/mjl-/mox@'+h) :
'https://beta.gobuilds.org/github.com/mjl-/mox@'+h
),
elem('p', 'Because mox is written in Go, builds are reproducible, also when cross-compiling. Gobuilds.org is a service that builds Go applications on-demand with the latest Go toolchain/runtime.'),
elem('h1', 'Localserve'),
elem('p', 'Changes to mox can often be most easily tested locally with ', link('../features/#hdr-localserve', '"mox localserve"'), ', without having to update your running mail server.'),
)
}
window.addEventListener('load', init)
</script>
</body>
</html>
EOF

48
go.mod
View File

@ -1,37 +1,31 @@
module github.com/mjl-/mox module github.com/mjl-/mox
go 1.23.0 go 1.18
require ( require (
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea github.com/mjl-/bstore v0.0.1
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31 github.com/mjl-/sconf v0.0.4
github.com/mjl-/bstore v0.0.9 github.com/mjl-/sherpa v0.6.5
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978 github.com/mjl-/sherpadoc v0.0.10
github.com/mjl-/sconf v0.0.7
github.com/mjl-/sherpa v0.6.7
github.com/mjl-/sherpadoc v0.0.16
github.com/mjl-/sherpaprom v0.0.2 github.com/mjl-/sherpaprom v0.0.2
github.com/mjl-/sherpats v0.0.6 github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_golang v1.18.0 go.etcd.io/bbolt v1.3.7
github.com/russross/blackfriday/v2 v2.1.0 golang.org/x/crypto v0.8.0
go.etcd.io/bbolt v1.3.11 golang.org/x/net v0.9.0
golang.org/x/crypto v0.37.0 golang.org/x/text v0.9.0
golang.org/x/net v0.39.0
golang.org/x/sys v0.32.0
golang.org/x/text v0.24.0
rsc.io/qr v0.2.0
) )
require ( require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/golang/protobuf v1.5.2 // indirect
github.com/mjl-/xfmt v0.0.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/prometheus/client_model v0.5.0 // indirect github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce // indirect
github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/common v0.37.0 // indirect
golang.org/x/mod v0.24.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
golang.org/x/sync v0.13.0 // indirect golang.org/x/mod v0.8.0 // indirect
golang.org/x/tools v0.32.0 // indirect golang.org/x/sys v0.7.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect golang.org/x/tools v0.6.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
) )

505
go.sum
View File

@ -1,117 +1,510 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/mjl-/bstore v0.0.1 h1:OzQfYgpMCvNjNIj9FFJ3HidYzG6eSlLSYzCTzw9sptY=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mjl-/bstore v0.0.1/go.mod h1:/cD25FNBaDfvL/plFRxI3Ba3E+wcB0XVOS8nJDqndg0=
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea h1:8dftsVL1tHhRksXzFZRhSJ7gSlcy/t87Nvucs3JnTGE= github.com/mjl-/sconf v0.0.4 h1:uyfn4vv5qOULSgiwQsPbbgkiONKnMFMsSOhsHfAiYwI=
github.com/mjl-/adns v0.0.0-20250321173553-ab04b05bdfea/go.mod h1:rWZMqGA2HoBm5b5q/A5J8u1sSVuEYh6zBz9tMoVs+RU= github.com/mjl-/sconf v0.0.4/go.mod h1:ezf7YOn7gtClo8y71SqgZKaEkyMQ5Te7vkv4PmTTfwM=
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31 h1:6MFGOLPGf6VzHWkKv8waSzJMMS98EFY2LVKPRHffCyo= github.com/mjl-/sherpa v0.6.5 h1:d90uG/j8fw+2M+ohCTAcVwTSUURGm8ktYDScJO1nKog=
github.com/mjl-/autocert v0.0.0-20250321204043-abab2b936e31/go.mod h1:taMFU86abMxKLPV4Bynhv8enbYmS67b8LG80qZv2Qus= github.com/mjl-/sherpa v0.6.5/go.mod h1:dSpAOdgpwdqQZ72O4n3EHo/tR68eKyan8tYYraUMPNc=
github.com/mjl-/bstore v0.0.9 h1:j8HVXL10Arbk4ujeRGwns8gipH1N1TZn853inQ42FgY=
github.com/mjl-/bstore v0.0.9/go.mod h1:xzIpSfcFosgPJ6h+vsdIt0pzCq4i8hhMuHPQJ0aHQhM=
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978 h1:Eg5DfI3/00URzGErujKus6a3O0kyXzF8vjoDZzH/gig=
github.com/mjl-/flate v0.0.0-20250221133712-6372d09eb978/go.mod h1:QBkFtjai3AiQQuUu7pVh6PA06Vd3oa68E+vddf/UBOs=
github.com/mjl-/sconf v0.0.7 h1:bdBcSFZCDFMm/UdBsgNCsjkYmKrSgYwp7rAOoufwHe4=
github.com/mjl-/sconf v0.0.7/go.mod h1:uF8OdWtLT8La3i4ln176i1pB0ps9pXGCaABEU55ZkE0=
github.com/mjl-/sherpa v0.6.7 h1:C5F8XQdV5nCuS4fvB+ye/ziUQrajEhOoj/t2w5T14BY=
github.com/mjl-/sherpa v0.6.7/go.mod h1:dSpAOdgpwdqQZ72O4n3EHo/tR68eKyan8tYYraUMPNc=
github.com/mjl-/sherpadoc v0.0.0-20190505200843-c0a7f43f5f1d/go.mod h1:5khTKxoKKNXcB8bkVUO6GlzC7PFtMmkHq578lPbmnok= github.com/mjl-/sherpadoc v0.0.0-20190505200843-c0a7f43f5f1d/go.mod h1:5khTKxoKKNXcB8bkVUO6GlzC7PFtMmkHq578lPbmnok=
github.com/mjl-/sherpadoc v0.0.16 h1:BdlFNXfnTaA7qO54kof4xpNFJxYBTY0cIObRk7QAP6M= github.com/mjl-/sherpadoc v0.0.10 h1:tvRVd37IIGg70ZmNkNKNnjDSPtKI5/DdEIukMkWtZYE=
github.com/mjl-/sherpadoc v0.0.16/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I= github.com/mjl-/sherpadoc v0.0.10/go.mod h1:vh5zcsk3j/Tvm725EY+unTZb3EZcZcpiEQzrODSa6+I=
github.com/mjl-/sherpaprom v0.0.2 h1:1dlbkScsNafM5jURI44uiWrZMSwfZtcOFEEq7vx2C1Y= github.com/mjl-/sherpaprom v0.0.2 h1:1dlbkScsNafM5jURI44uiWrZMSwfZtcOFEEq7vx2C1Y=
github.com/mjl-/sherpaprom v0.0.2/go.mod h1:cl5nMNOvqhzMiQJ2FzccQ9ReivjHXe53JhOVkPfSvw4= github.com/mjl-/sherpaprom v0.0.2/go.mod h1:cl5nMNOvqhzMiQJ2FzccQ9ReivjHXe53JhOVkPfSvw4=
github.com/mjl-/sherpats v0.0.6 h1:2lSoJbb+jkjLOdlvoMxItq0QQrrnkH+rnm3PMRfpbmA= github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce h1:oyFmIHo3GLWZzb0odAzN9QUy0MTW6P8JaNRnNVGCBCk=
github.com/mjl-/sherpats v0.0.6/go.mod h1:MoNZJtLmu8oCZ4Ocv5vZksENN4pp6/SJMlg9uTII4KA= github.com/mjl-/xfmt v0.0.0-20190521151243-39d9c00752ce/go.mod h1:DIEOLmETMQHHr4OgwPG7iC37rDiN9MaZIZxNm5hBtL8=
github.com/mjl-/xfmt v0.0.2 h1:6dLgd6U3bmDJKtTxsaSYYyMaORoO4hKBAJo4XKkPRko= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/mjl-/xfmt v0.0.2/go.mod h1:DIEOLmETMQHHr4OgwPG7iC37rDiN9MaZIZxNm5hBtL8= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190503130316-740c07785007/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190503130316-740c07785007/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/qr v0.2.0/go.mod h1:IF+uZjkb9fqyeF/4tlBoynqmQxUoPfWEKh921coOuXs= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

360
http/account.go Normal file
View File

@ -0,0 +1,360 @@
package http
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"context"
"encoding/base64"
"encoding/json"
"errors"
"io"
"net"
"net/http"
"os"
"strings"
"time"
_ "embed"
"github.com/mjl-/sherpa"
"github.com/mjl-/sherpaprom"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/metrics"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/moxvar"
"github.com/mjl-/mox/store"
)
//go:embed accountapi.json
var accountapiJSON []byte
//go:embed account.html
var accountHTML []byte
var accountDoc = mustParseAPI(accountapiJSON)
var accountSherpaHandler http.Handler
func init() {
collector, err := sherpaprom.NewCollector("moxaccount", nil)
if err != nil {
xlog.Fatalx("creating sherpa prometheus collector", err)
}
accountSherpaHandler, err = sherpa.NewHandler("/api/", moxvar.Version, Account{}, &accountDoc, &sherpa.HandlerOpts{Collector: collector, AdjustFunctionNames: "none"})
if err != nil {
xlog.Fatalx("sherpa handler", err)
}
}
// Account exports web API functions for the account web interface. All its
// methods are exported under api/. Function calls require valid HTTP
// Authentication credentials of a user.
type Account struct{}
// check http basic auth, returns account name if valid, and writes http response
// and returns empty string otherwise.
func checkAccountAuth(ctx context.Context, log *mlog.Log, w http.ResponseWriter, r *http.Request) string {
authResult := "error"
start := time.Now()
var addr *net.TCPAddr
defer func() {
metrics.AuthenticationInc("httpaccount", "httpbasic", authResult)
if authResult == "ok" && addr != nil {
mox.LimiterFailedAuth.Reset(addr.IP, start)
}
}()
var err error
var remoteIP net.IP
addr, err = net.ResolveTCPAddr("tcp", r.RemoteAddr)
if err != nil {
log.Errorx("parsing remote address", err, mlog.Field("addr", r.RemoteAddr))
} else if addr != nil {
remoteIP = addr.IP
}
if remoteIP != nil && !mox.LimiterFailedAuth.Add(remoteIP, start, 1) {
metrics.AuthenticationRatelimitedInc("httpaccount")
http.Error(w, "429 - too many auth attempts", http.StatusTooManyRequests)
return ""
}
// store.OpenEmailAuth has an auth cache, so we don't bcrypt for every auth attempt.
if auth := r.Header.Get("Authorization"); auth == "" || !strings.HasPrefix(auth, "Basic ") {
} else if authBuf, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")); err != nil {
log.Debugx("parsing base64", err)
} else if t := strings.SplitN(string(authBuf), ":", 2); len(t) != 2 {
log.Debug("bad user:pass form")
} else if acc, err := store.OpenEmailAuth(t[0], t[1]); err != nil {
if errors.Is(err, store.ErrUnknownCredentials) {
authResult = "badcreds"
log.Info("failed authentication attempt", mlog.Field("username", t[0]), mlog.Field("remote", remoteIP))
}
log.Errorx("open account", err)
} else {
authResult = "ok"
accName := acc.Name
err := acc.Close()
log.Check(err, "closing account")
return accName
}
// note: browsers don't display the realm to prevent users getting confused by malicious realm messages.
w.Header().Set("WWW-Authenticate", `Basic realm="mox account - login with email address and password"`)
http.Error(w, "http 401 - unauthorized - mox account - login with email address and password", http.StatusUnauthorized)
return ""
}
func accountHandle(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), mlog.CidKey, mox.Cid())
log := xlog.WithContext(ctx).Fields(mlog.Field("userauth", ""))
// Without authentication. The token is unguessable.
if r.URL.Path == "/importprogress" {
if r.Method != "GET" {
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
return
}
q := r.URL.Query()
token := q.Get("token")
if token == "" {
http.Error(w, "400 - bad request - missing token", http.StatusBadRequest)
return
}
flusher, ok := w.(http.Flusher)
if !ok {
log.Error("internal error: ResponseWriter not a http.Flusher")
http.Error(w, "500 - internal error - cannot sync to http connection", 500)
return
}
l := importListener{token, make(chan importEvent, 100), make(chan bool, 1)}
importers.Register <- &l
ok = <-l.Register
if !ok {
http.Error(w, "400 - bad request - unknown token, import may have finished more than a minute ago", http.StatusBadRequest)
return
}
defer func() {
importers.Unregister <- &l
}()
h := w.Header()
h.Set("Content-Type", "text/event-stream")
h.Set("Cache-Control", "no-cache")
_, err := w.Write([]byte(": keepalive\n\n"))
if err != nil {
return
}
flusher.Flush()
cctx := r.Context()
for {
select {
case e := <-l.Events:
_, err := w.Write(e.SSEMsg)
flusher.Flush()
if err != nil {
return
}
case <-cctx.Done():
return
}
}
}
accName := checkAccountAuth(ctx, log, w, r)
if accName == "" {
// Response already sent.
return
}
switch r.URL.Path {
case "/":
if r.Method != "GET" {
http.Error(w, "405 - method not allowed - post required", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.Header().Set("Cache-Control", "no-cache; max-age=0")
// We typically return the embedded admin.html, but during development it's handy
// to load from disk.
f, err := os.Open("http/account.html")
if err == nil {
defer f.Close()
_, _ = io.Copy(w, f)
} else {
_, _ = w.Write(accountHTML)
}
case "/mail-export-maildir.tgz", "/mail-export-maildir.zip", "/mail-export-mbox.tgz", "/mail-export-mbox.zip":
maildir := strings.Contains(r.URL.Path, "maildir")
tgz := strings.Contains(r.URL.Path, ".tgz")
acc, err := store.OpenAccount(accName)
if err != nil {
log.Errorx("open account for export", err)
http.Error(w, "500 - internal server error", http.StatusInternalServerError)
return
}
defer func() {
err := acc.Close()
log.Check(err, "closing account")
}()
var archiver store.Archiver
if tgz {
// Don't tempt browsers to "helpfully" decompress.
w.Header().Set("Content-Type", "application/octet-stream")
gzw := gzip.NewWriter(w)
defer func() {
_ = gzw.Close()
}()
archiver = store.TarArchiver{Writer: tar.NewWriter(gzw)}
} else {
w.Header().Set("Content-Type", "application/zip")
archiver = store.ZipArchiver{Writer: zip.NewWriter(w)}
}
defer func() {
err := archiver.Close()
log.Check(err, "exporting mail close")
}()
if err := store.ExportMessages(r.Context(), log, acc.DB, acc.Dir, archiver, maildir, ""); err != nil {
log.Errorx("exporting mail", err)
}
case "/import":
if r.Method != "POST" {
http.Error(w, "405 - method not allowed - post required", http.StatusMethodNotAllowed)
return
}
f, _, err := r.FormFile("file")
if err != nil {
if errors.Is(err, http.ErrMissingFile) {
http.Error(w, "400 - bad request - missing file", http.StatusBadRequest)
} else {
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
}
return
}
defer func() {
err := f.Close()
log.Check(err, "closing form file")
}()
skipMailboxPrefix := r.FormValue("skipMailboxPrefix")
tmpf, err := os.CreateTemp("", "mox-import")
if err != nil {
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
return
}
defer func() {
if tmpf != nil {
err := tmpf.Close()
log.Check(err, "closing uploaded file")
}
}()
if err := os.Remove(tmpf.Name()); err != nil {
log.Errorx("removing temporary file", err)
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
return
}
if _, err := io.Copy(tmpf, f); err != nil {
log.Errorx("copying import to temporary file", err)
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
return
}
token, err := importStart(log, accName, tmpf, skipMailboxPrefix)
if err != nil {
log.Errorx("starting import", err)
http.Error(w, "500 - internal server error - "+err.Error(), http.StatusInternalServerError)
return
}
tmpf = nil // importStart is now responsible for closing.
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(map[string]string{"ImportToken": token})
default:
if strings.HasPrefix(r.URL.Path, "/api/") {
accountSherpaHandler.ServeHTTP(w, r.WithContext(context.WithValue(ctx, authCtxKey, accName)))
return
}
http.NotFound(w, r)
}
}
type ctxKey string
var authCtxKey ctxKey = "account"
// SetPassword saves a new password for the account, invalidating the previous password.
// Sessions are not interrupted, and will keep working. New login attempts must use the new password.
// Password must be at least 8 characters.
func (Account) SetPassword(ctx context.Context, password string) {
if len(password) < 8 {
panic(&sherpa.Error{Code: "user:error", Message: "password must be at least 8 characters"})
}
accountName := ctx.Value(authCtxKey).(string)
acc, err := store.OpenAccount(accountName)
xcheckf(ctx, err, "open account")
defer func() {
err := acc.Close()
xlog.Check(err, "closing account")
}()
err = acc.SetPassword(password)
xcheckf(ctx, err, "setting password")
}
// Destinations returns the default domain, and the destinations (keys are email
// addresses, or localparts to the default domain).
// todo: replace with a function that returns the whole account, when sherpadoc understands unnamed struct fields.
func (Account) Destinations(ctx context.Context) (dns.Domain, map[string]config.Destination) {
accountName := ctx.Value(authCtxKey).(string)
accConf, ok := mox.Conf.Account(accountName)
if !ok {
xcheckf(ctx, errors.New("not found"), "looking up account")
}
return accConf.DNSDomain, accConf.Destinations
}
// DestinationSave updates a destination.
// OldDest is compared against the current destination. If it does not match, an
// error is returned. Otherwise newDest is saved and the configuration reloaded.
func (Account) DestinationSave(ctx context.Context, destName string, oldDest, newDest config.Destination) {
accountName := ctx.Value(authCtxKey).(string)
accConf, ok := mox.Conf.Account(accountName)
if !ok {
xcheckf(ctx, errors.New("not found"), "looking up account")
}
curDest, ok := accConf.Destinations[destName]
if !ok {
xcheckf(ctx, errors.New("not found"), "looking up destination")
}
if !curDest.Equal(oldDest) {
xcheckf(ctx, errors.New("modified"), "checking stored destination")
}
// Keep fields we manage.
newDest.DMARCReports = curDest.DMARCReports
newDest.TLSReports = curDest.TLSReports
err := mox.DestinationSave(ctx, accountName, destName, newDest)
xcheckf(ctx, err, "saving destination")
}
// ImportAbort aborts an import that is in progress. If the import exists and isn't
// finished, no changes will have been made by the import.
func (Account) ImportAbort(ctx context.Context, importToken string) error {
req := importAbortRequest{importToken, make(chan error)}
importers.Abort <- req
return <-req.Response
}

673
http/account.html Normal file
View File

@ -0,0 +1,673 @@
<!doctype html>
<html>
<head>
<title>Mox Account</title>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<style>
body, html { padding: 1em; font-size: 16px; }
* { font-size: inherit; font-family: ubuntu, lato, sans-serif; margin: 0; padding: 0; box-sizing: border-box; }
h1, h2, h3, h4 { margin-bottom: 1ex; }
h1 { font-size: 1.2rem; }
h2 { font-size: 1.1rem; }
h3, h4 { font-size: 1rem; }
ul { padding-left: 1rem; }
.literal { background-color: #fdfdfd; padding: .5em 1em; border: 1px solid #eee; border-radius: 4px; white-space: pre-wrap; font-family: monospace; font-size: 15px; tab-size: 4; }
table td, table th { padding: .2em .5em; }
table > tbody > tr:nth-child(odd) { background-color: #f8f8f8; }
.text { max-width: 50em; }
p { margin-bottom: 1em; max-width: 50em; }
[title] { text-decoration: underline; text-decoration-style: dotted; }
fieldset { border: 0; }
#page { opacity: 1; animation: fadein 0.15s ease-in; }
#page.loading { opacity: 0.1; animation: fadeout 1s ease-out; }
@keyframes fadein { 0% { opacity: 0 } 100% { opacity: 1 } }
@keyframes fadeout { 0% { opacity: 1 } 100% { opacity: 0.1 } }
</style>
<script src="api/sherpa.js"></script>
<script>api._sherpa.baseurl = 'api/'</script>
</head>
<body>
<div id="page">Loading...</div>
<script>
const [dom, style, attr, prop] = (function() {
function _domKids(e, ...kl) {
kl.forEach(k => {
if (typeof k === 'string' || k instanceof String) {
e.appendChild(document.createTextNode(k))
} else if (k instanceof Node) {
e.appendChild(k)
} else if (Array.isArray(k)) {
_domKids(e, ...k)
} else if (typeof k === 'function') {
if (!k.name) {
throw new Error('function without name', k)
}
e.addEventListener(k.name, k)
} else if (typeof k === 'object' && k !== null) {
if (k.root) {
e.appendChild(k.root)
return
}
for (const key in k) {
const value = k[key]
if (key === '_prop') {
for (const prop in value) {
e[prop] = value[prop]
}
} else if (key === '_attr') {
for (const prop in value) {
e.setAttribute(prop, value[prop])
}
} else if (key === '_listen') {
e.addEventListener(...value)
} else {
e.style[key] = value
}
}
} else {
console.log('bad kid', k)
throw new Error('bad kid')
}
})
}
const _dom = (kind, ...kl) => {
const t = kind.split('.')
const e = document.createElement(t[0])
for (let i = 1; i < t.length; i++) {
e.classList.add(t[i])
}
_domKids(e, kl)
return e
}
_dom._kids = function(e, ...kl) {
while(e.firstChild) {
e.removeChild(e.firstChild)
}
_domKids(e, kl)
}
const dom = new Proxy(_dom, {
get: function(dom, prop) {
if (prop in dom) {
return dom[prop]
}
const fn = (...kl) => _dom(prop, kl)
dom[prop] = fn
return fn
},
apply: function(target, that, args) {
if (args.length === 1 && typeof args[0] === 'object' && !Array.isArray(args[0])) {
return {_attr: args[0]}
}
return _dom(...args)
},
})
const style = x => x
const attr = x => { return {_attr: x} }
const prop = x => { return {_prop: x} }
return [dom, style, attr, prop]
})()
const link = (href, anchorOpt) => dom.a(attr({href: href, rel: 'noopener noreferrer'}), anchorOpt || href)
const crumblink = (text, link) => dom.a(text, attr({href: link}))
const crumbs = (...l) => [dom.h1(l.map((e, index) => index === 0 ? e : [' / ', e])), dom.br()]
const footer = dom.div(
style({marginTop: '6ex', opacity: 0.75}),
link('https://github.com/mjl-/mox', 'mox'),
' ',
api._sherpa.version,
)
const domainName = d => {
return d.Unicode || d.ASCII
}
const domainString = d => {
if (d.Unicode) {
return d.Unicode+" ("+d.ASCII+")"
}
return d.ASCII
}
const box = (color, ...l) => [
dom.div(
style({
display: 'inline-block',
padding: '.25em .5em',
backgroundColor: color,
borderRadius: '3px',
margin: '.5ex 0',
}),
l,
),
dom.br(),
]
const green = '#1dea20'
const yellow = '#ffe400'
const red = '#ff7443'
const blue = '#8bc8ff'
const index = async () => {
const [domain, destinations] = await api.Destinations()
let passwordForm, passwordFieldset, password1, password2, passwordHint
let importForm, importFieldset, mailboxFile, mailboxFileHint, mailboxPrefix, mailboxPrefixHint, importProgress, importAbortBox, importAbort
const importTrack = async (token) => {
const importConnection = dom.div('Waiting for updates...')
importProgress.appendChild(importConnection)
let countsTbody
let counts = {} // mailbox -> elem
let problems // element
await new Promise((resolve, reject) => {
const eventSource = new window.EventSource('importprogress?token=' + encodeURIComponent(token))
eventSource.addEventListener('open', function(e) {
console.log('eventsource open', {e})
dom._kids(importConnection, dom.div('Waiting for updates, connected...'))
dom._kids(importAbortBox,
importAbort=dom.button('Abort import', attr({title: 'If the import is not yet finished, it can be aborted and no messages will have been imported.'}), async function click(e) {
try {
await api.ImportAbort(token)
} catch (err) {
console.log({err})
window.alert('Error: ' + err.message)
}
// On success, the event source will get an aborted notification and shutdown the connection.
})
)
})
eventSource.addEventListener('error', function(e) {
console.log('eventsource error', {e})
dom._kids(importConnection, box(red, 'Connection error'))
reject({message: 'Connection error'})
})
eventSource.addEventListener('count', (e) => {
const data = JSON.parse(e.data) // {Mailbox: ..., Count: ...}
console.log('import count event', {e, data})
if (!countsTbody) {
importProgress.appendChild(
dom.div(
dom.br(),
dom.h3('Importing mailboxes and messages...'),
dom.table(
dom.thead(
dom.tr(dom.th('Mailbox'), dom.th('Messages')),
),
countsTbody=dom.tbody(),
),
)
)
}
let elem = counts[data.Mailbox]
if (!elem) {
countsTbody.appendChild(
dom.tr(
dom.td(data.Mailbox),
elem=dom.td(style({textAlign: 'right'}), ''+data.Count),
),
)
counts[data.Mailbox] = elem
}
dom._kids(elem, ''+data.Count)
})
eventSource.addEventListener('problem', (e) => {
const data = JSON.parse(e.data) // {Message: ...}
console.log('import problem event', {e, data})
if (!problems) {
importProgress.appendChild(
dom.div(
dom.br(),
dom.h3('Problems during import'),
problems=dom.div(),
),
)
}
problems.appendChild(dom.div(box(yellow, data.Message)))
})
eventSource.addEventListener('done', (e) => {
console.log('import done event', {e})
importProgress.appendChild(dom.div(dom.br(), box(blue, 'Import finished')))
eventSource.close()
dom._kids(importConnection)
dom._kids(importAbortBox)
window.sessionStorage.removeItem('ImportToken')
resolve()
})
eventSource.addEventListener('aborted', function(e) {
console.log('import aborted event', {e})
importProgress.appendChild(dom.div(dom.br(), box(red, 'Import aborted, no message imported')))
eventSource.close()
dom._kids(importConnection)
dom._kids(importAbortBox)
window.sessionStorage.removeItem('ImportToken')
reject({message: 'Import aborted'})
})
})
}
const page = document.getElementById('page')
dom._kids(page,
crumbs('Mox Account'),
dom.p('NOTE: Not all account settings can be configured through these pages yet. See the configuration file for more options.'),
dom.div(
'Default domain: ',
domain.ASCII ? domainString(domain) : '(none)',
),
dom.br(),
dom.h2('Addresses'),
dom.ul(
Object.entries(destinations).sort().map(t =>
dom.li(
dom.a(t[0], attr({href: '#destinations/'+t[0]})),
t[0].startsWith('@') ? ' (catchall)' : [],
),
),
),
dom.br(),
dom.h2('Change password'),
passwordForm=dom.form(
passwordFieldset=dom.fieldset(
dom.label(
style({display: 'inline-block'}),
'New password',
dom.br(),
password1=dom.input(attr({type: 'password', required: ''}), function focus() {
passwordHint.style.display = ''
}),
),
' ',
dom.label(
style({display: 'inline-block'}),
'New password repeat',
dom.br(),
password2=dom.input(attr({type: 'password', required: ''})),
),
' ',
dom.button('Change password'),
),
passwordHint=dom.div(
style({display: 'none', marginTop: '.5ex'}),
dom.button('Generate random password', attr({type: 'button'}), function click(e) {
e.preventDefault()
let b = new Uint8Array(1)
let s = ''
const chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*-_;:,<.>/'
while (s.length < 12) {
self.crypto.getRandomValues(b)
if (Math.ceil(b[0]/chars.length)*chars.length > 255) {
continue // Prevent bias.
}
s += chars[b[0]%chars.length]
}
password1.type = 'text'
password2.type = 'text'
password1.value = s
password2.value = s
}),
dom('div.text',
box(yellow, 'Important: Bots will try to bruteforce your password. Connections with failed authentication attempts will be rate limited but attackers WILL find weak passwords. If your account is compromised, spammers are likely to abuse your system, spamming your address and the wider internet in your name. So please pick a random, unguessable password, preferrably at least 12 characters.'),
),
),
async function submit(e) {
e.stopPropagation()
e.preventDefault()
if (!password1.value || password1.value !== password2.value) {
window.alert('Passwords do not match.')
return
}
passwordFieldset.disabled = true
try {
await api.SetPassword(password1.value)
window.alert('Password has been changed.')
passwordForm.reset()
} catch (err) {
console.log({err})
window.alert('Error: ' + err.message)
} finally {
passwordFieldset.disabled = false
}
},
),
dom.br(),
dom.h2('Export'),
dom.p('Export all messages in all mailboxes. In maildir or mbox format, as .zip or .tgz file.'),
dom.ul(
dom.li(dom.a('mail-export-maildir.tgz', attr({href: 'mail-export-maildir.tgz'}))),
dom.li(dom.a('mail-export-maildir.zip', attr({href: 'mail-export-maildir.zip'}))),
dom.li(dom.a('mail-export-mbox.tgz', attr({href: 'mail-export-mbox.tgz'}))),
dom.li(dom.a('mail-export-mbox.zip', attr({href: 'mail-export-mbox.zip'}))),
),
dom.br(),
dom.h2('Import'),
dom.p('Import messages from a .zip or .tgz file with maildirs and/or mbox files.'),
importForm=dom.form(
async function submit(e) {
e.preventDefault()
e.stopPropagation()
const request = () => {
return new Promise((resolve, reject) => {
// Browsers can do everything. Except show a progress bar while uploading...
let progressBox, progressPercentage, progressBar
dom._kids(importProgress,
progressBox=dom.div(
dom.div('Uploading... ', progressPercentage=dom.span()),
),
)
importProgress.style.display = ''
const xhr = new window.XMLHttpRequest()
xhr.open('POST', 'import', true)
xhr.upload.addEventListener('progress', (e) => {
if (!e.lengthComputable) {
return
}
const pct = Math.floor(100*e.loaded/e.total)
dom._kids(progressPercentage, pct+'%')
})
xhr.addEventListener('load', () => {
console.log('upload done', {xhr: xhr, status: xhr.status})
if (xhr.status !== 200) {
reject({message: 'status '+xhr.status})
return
}
let resp
try {
resp = JSON.parse(xhr.responseText)
} catch (err) {
reject({message: 'parsing resonse json: '+err.message})
return
}
resolve(resp)
})
xhr.addEventListener('error', (e) => reject({message: 'upload error', event: e}))
xhr.addEventListener('abort', (e) => reject({message: 'upload aborted', event: e}))
xhr.send(new window.FormData(importForm))
})
}
try {
const p = request()
importFieldset.disabled = true
const result = await p
try {
window.sessionStorage.setItem('ImportToken', result.ImportToken)
} catch (err) {
console.log('storing import token in session storage', {err})
// Ignore error, could be some browser security thing like private browsing.
}
await importTrack(result.ImportToken)
} catch (err) {
console.log({err})
window.alert('Error: '+err.message)
} finally {
importFieldset.disabled = false
}
},
importFieldset=dom.fieldset(
dom.div(
style({marginBottom: '1ex'}),
dom.label(
dom.div(style({marginBottom: '.5ex'}), 'File'),
mailboxFile=dom.input(attr({type: 'file', required: '', name: 'file'}), function focus() {
mailboxFileHint.style.display = ''
}),
),
mailboxFileHint=dom.p(style({display: 'none', fontStyle: 'italic', marginTop: '.5ex'}), 'This file must either be a zip file or a gzipped tar file with mbox and/or maildir mailboxes. For maildirs, an optional file "dovecot-keywords" is read additional keywords, like Forwarded/Junk/NotJunk. If an imported mailbox already exists by name, messages are added to the existing mailbox. If a mailbox does not yet exist it will be created.'),
),
dom.div(
style({marginBottom: '1ex'}),
dom.label(
dom.div(style({marginBottom: '.5ex'}), 'Skip mailbox prefix (optional)'),
mailboxPrefix=dom.input(attr({name: 'skipMailboxPrefix'}), function focus() {
mailboxPrefixHint.style.display = ''
}),
),
mailboxPrefixHint=dom.p(style({display: 'none', fontStyle: 'italic', marginTop: '.5ex'}), 'If set, any mbox/maildir path with this prefix will have it stripped before importing. For example, if all mailboxes are in a directory "Takeout", specify that path in the field above so mailboxes like "Takeout/Inbox.mbox" are imported into a mailbox called "Inbox" instead of "Takeout/Inbox".'),
),
dom.div(
dom.button('Upload and import'),
dom.p(style({fontStyle: 'italic', marginTop: '.5ex'}), 'The file is uploaded first, then its messages are imported. Importing is done in a transaction, you can abort the entire import before it is finished.'),
),
),
),
importAbortBox=dom.div(), // Outside fieldset because it gets disabled, above progress because may be scrolling it down quickly with problems.
importProgress=dom.div(
style({display: 'none'}),
),
footer,
)
// Try to show the progress of an earlier import session. The user may have just
// refreshed the browser.
let importToken
try {
importToken = window.sessionStorage.getItem('ImportToken')
} catch (err) {
console.log('looking up ImportToken in session storage', {err})
return
}
if (!importToken) {
return
}
importFieldset.disabled = true
dom._kids(importProgress,
dom.div(
dom.div('Reconnecting to import...'),
),
)
importProgress.style.display = ''
importTrack(importToken)
.catch((err) => {
if (window.confirm('Error reconnecting to import. Remove this import session?')) {
window.sessionStorage.removeItem('ImportToken')
dom._kids(importProgress)
importProgress.style.display = 'none'
}
})
.finally(() => {
importFieldset.disabled = false
})
}
const destination = async (name) => {
const [domain, destinations] = await api.Destinations()
let dest = destinations[name]
if (!dest) {
throw new Error('destination not found')
}
let rulesetsTbody = dom.tbody()
let rulesetsRows = []
const addRulesetsRow = (rs) => {
let headersCell = dom.td()
let headers = [] // Holds objects: {key, value, root}
const addHeader = (k, v) => {
let h = {}
h.root = dom.div(
h.key=dom.input(attr({value: k})),
' ',
h.value=dom.input(attr({value: v})),
' ',
dom.button('-', style({width: '1.5em'}), function click(e) {
h.root.remove()
headers = headers.filter(x => x !== h)
if (headers.length === 0) {
const b = dom.button('+', style({width: '1.5em'}), function click(e) {
e.target.remove()
addHeader('', '')
})
headersCell.appendChild(dom.div(style({textAlign: 'right'}), b))
}
}),
' ',
dom.button('+', style({width: '1.5em'}), function click(e) {
addHeader('', '')
}),
)
headers.push(h)
headersCell.appendChild(h.root)
}
Object.entries(rs.HeadersRegexp || {}).sort().forEach(t =>
addHeader(t[0], t[1])
)
if (Object.entries(rs.HeadersRegexp || {}).length === 0) {
const b = dom.button('+', style({width: '1.5em'}), function click(e) {
e.target.remove()
addHeader('', '')
})
headersCell.appendChild(dom.div(style({textAlign: 'right'}), b))
}
let row = {headers}
row.root=dom.tr(
dom.td(row.SMTPMailFromRegexp=dom.input(attr({value: rs.SMTPMailFromRegexp || ''}))),
dom.td(row.VerifiedDomain=dom.input(attr({value: rs.VerifiedDomain || ''}))),
headersCell,
dom.td(row.ListAllowDomain=dom.input(attr({value: rs.ListAllowDomain || ''}))),
dom.td(row.Mailbox=dom.input(attr({value: rs.Mailbox || ''}))),
dom.td(
dom.button('Remove ruleset', function click(e) {
row.root.remove()
rulesetsRows = rulesetsRows.filter(e => e !== row)
}),
),
)
rulesetsRows.push(row)
rulesetsTbody.appendChild(row.root)
}
(dest.Rulesets || []).forEach(rs => {
addRulesetsRow(rs)
})
let defaultMailbox
let saveButton
const page = document.getElementById('page')
dom._kids(page,
crumbs(
crumblink('Mox Account', '#'),
'Destination ' + name,
),
dom.div(
dom.span('Default mailbox', attr({title: 'Default mailbox where email for this recipient is delivered to if it does not match any ruleset. Default is Inbox.'})),
dom.br(),
defaultMailbox=dom.input(attr({value: dest.Mailbox, placeholder: 'Inbox'})),
dom
),
dom.br(),
dom.h2('Rulesets'),
dom.p('Incoming messages are checked against the rulesets. If a ruleset matches, the message is delivered to the mailbox configured for the ruleset instead of to the default mailbox.'),
dom.p('The "List allow domain" does not affect the matching, but skips the regular spam checks if one of the verified domains is a (sub)domain of the domain mentioned here.'),
dom.table(
dom.thead(
dom.tr(
dom.th('SMTP "MAIL FROM" regexp', attr({title: 'Matches if this regular expression matches (a substring of) the SMTP MAIL FROM address (not the message From-header). E.g. user@example.org.'})),
dom.th('Verified domain', attr({title: 'Matches if this domain matches an SPF- and/or DKIM-verified (sub)domain.'})),
dom.th('Headers regexp', attr({title: 'Matches if these header field/value regular expressions all match (substrings of) the message headers. Header fields and valuees are converted to lower case before matching. Whitespace is trimmed from the value before matching. A header field can occur multiple times in a message, only one instance has to match. For mailing lists, you could match on ^list-id$ with the value typically the mailing list address in angled brackets with @ replaced with a dot, e.g. <name\\.lists\\.example\\.org>.'})),
dom.th('List allow domain', attr({title: "Influence the spam filtering, this does not change whether this ruleset applies to a message. If this domain matches an SPF- and/or DKIM-verified (sub)domain, the message is accepted without further spam checks, such as a junk filter or DMARC reject evaluation. DMARC rejects should not apply for mailing lists that are not configured to rewrite the From-header of messages that don't have a passing DKIM signature of the From-domain. Otherwise, by rejecting messages, you may be automatically unsubscribed from the mailing list. The assumption is that mailing lists do their own spam filtering/moderation."})),
dom.th('Mailbox', attr({title: 'Mailbox to deliver to if this ruleset matches.'})),
dom.th('Action'),
)
),
rulesetsTbody,
dom.tfoot(
dom.tr(
dom.td(attr({colspan: '5'})),
dom.td(
dom.button('Add ruleset', function click(e) {
addRulesetsRow({})
}),
),
),
),
),
dom.br(),
saveButton=dom.button('Save', async function click(e) {
saveButton.disabled = true
try {
const newDest = {
Mailbox: defaultMailbox.value,
Rulesets: rulesetsRows.map(row => {
return {
SMTPMailFromRegexp: row.SMTPMailFromRegexp.value,
VerifiedDomain: row.VerifiedDomain.value,
HeadersRegexp: Object.fromEntries(row.headers.map(h => [h.key.value, h.value.value])),
ListAllowDomain: row.ListAllowDomain.value,
Mailbox: row.Mailbox.value,
}
}),
}
page.classList.add('loading')
await api.DestinationSave(name, dest, newDest)
dest = newDest // Set new dest, for if user edits again. Without this, they would get an error that the config has been modified.
} catch (err) {
console.log({err})
window.alert('Error: '+err.message)
return
} finally {
saveButton.disabled = false
page.classList.remove('loading')
}
}),
)
}
const init = async () => {
let curhash
const page = document.getElementById('page')
const hashChange = async () => {
if (curhash === window.location.hash) {
return
}
let h = decodeURIComponent(window.location.hash)
if (h !== '' && h.substring(0, 1) == '#') {
h = h.substring(1)
}
const t = h.split('/')
page.classList.add('loading')
try {
if (h === '') {
await index()
} else if (t[0] === 'destinations' && t.length === 2) {
await destination(t[1])
} else {
dom._kids(page, 'page not found')
}
} catch (err) {
console.log({err})
window.alert('Error: ' + err.message)
window.location.hash = curhash
curhash = window.location.hash
return
}
curhash = window.location.hash
page.classList.remove('loading')
}
window.addEventListener('hashchange', hashChange)
hashChange()
}
window.addEventListener('load', init)
</script>
</body>
</html>

181
http/account_test.go Normal file
View File

@ -0,0 +1,181 @@
package http
import (
"archive/tar"
"archive/zip"
"bytes"
"compress/gzip"
"context"
"encoding/json"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/store"
)
func tcheck(t *testing.T, err error, msg string) {
t.Helper()
if err != nil {
t.Fatalf("%s: %s", msg, err)
}
}
func TestAccount(t *testing.T) {
os.RemoveAll("../testdata/httpaccount/data")
mox.ConfigStaticPath = "../testdata/httpaccount/mox.conf"
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
mox.MustLoadConfig(false)
acc, err := store.OpenAccount("mjl")
tcheck(t, err, "open account")
defer acc.Close()
switchDone := store.Switchboard()
defer close(switchDone)
log := mlog.New("store")
test := func(authHdr string, expect string) {
t.Helper()
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/ignored", nil)
if authHdr != "" {
r.Header.Add("Authorization", authHdr)
}
ok := checkAccountAuth(context.Background(), log, w, r)
if ok != expect {
t.Fatalf("got %v, expected %v", ok, expect)
}
}
const authOK = "Basic bWpsQG1veC5leGFtcGxlOnRlc3QxMjM0" // mjl@mox.example:test1234
const authBad = "Basic bWpsQG1veC5leGFtcGxlOmJhZHBhc3N3b3Jk" // mjl@mox.example:badpassword
authCtx := context.WithValue(context.Background(), authCtxKey, "mjl")
test(authOK, "") // No password set yet.
Account{}.SetPassword(authCtx, "test1234")
test(authOK, "mjl")
test(authBad, "")
_, dests := Account{}.Destinations(authCtx)
Account{}.DestinationSave(authCtx, "mjl@mox.example", dests["mjl@mox.example"], dests["mjl@mox.example"]) // todo: save modified value and compare it afterwards
go importManage()
// Import mbox/maildir tgz/zip.
testImport := func(filename string, expect int) {
t.Helper()
var reqBody bytes.Buffer
mpw := multipart.NewWriter(&reqBody)
part, err := mpw.CreateFormFile("file", path.Base(filename))
tcheck(t, err, "creating form file")
buf, err := os.ReadFile(filename)
tcheck(t, err, "reading file")
_, err = part.Write(buf)
tcheck(t, err, "write part")
err = mpw.Close()
tcheck(t, err, "close multipart writer")
r := httptest.NewRequest("POST", "/import", &reqBody)
r.Header.Add("Content-Type", mpw.FormDataContentType())
r.Header.Add("Authorization", authOK)
w := httptest.NewRecorder()
accountHandle(w, r)
if w.Code != http.StatusOK {
t.Fatalf("import, got status code %d, expected 200: %s", w.Code, w.Body.Bytes())
}
m := map[string]string{}
if err := json.Unmarshal(w.Body.Bytes(), &m); err != nil {
t.Fatalf("parsing import response: %v", err)
}
token := m["ImportToken"]
l := importListener{token, make(chan importEvent, 100), make(chan bool)}
importers.Register <- &l
if !<-l.Register {
t.Fatalf("register failed")
}
defer func() {
importers.Unregister <- &l
}()
count := 0
loop:
for {
e := <-l.Events
switch x := e.Event.(type) {
case importCount:
count += x.Count
case importProblem:
t.Fatalf("unexpected problem: %q", x.Message)
case importDone:
break loop
case importAborted:
t.Fatalf("unexpected aborted import")
default:
panic("missing case")
}
}
if count != expect {
t.Fatalf("imported %d messages, expected %d", count, expect)
}
}
testImport("../testdata/importtest.mbox.zip", 2)
testImport("../testdata/importtest.maildir.tgz", 2)
testExport := func(httppath string, iszip bool, expectFiles int) {
t.Helper()
r := httptest.NewRequest("GET", httppath, nil)
r.Header.Add("Authorization", authOK)
w := httptest.NewRecorder()
accountHandle(w, r)
if w.Code != http.StatusOK {
t.Fatalf("export, got status code %d, expected 200: %s", w.Code, w.Body.Bytes())
}
var count int
if iszip {
buf := w.Body.Bytes()
zr, err := zip.NewReader(bytes.NewReader(buf), int64(len(buf)))
tcheck(t, err, "reading zip")
for _, f := range zr.File {
if !strings.HasSuffix(f.Name, "/") {
count++
}
}
} else {
gzr, err := gzip.NewReader(w.Body)
tcheck(t, err, "gzip reader")
tr := tar.NewReader(gzr)
for {
h, err := tr.Next()
if err == io.EOF {
break
}
tcheck(t, err, "next file in tar")
if !strings.HasSuffix(h.Name, "/") {
count++
}
_, err = io.Copy(io.Discard, tr)
tcheck(t, err, "reading from tar")
}
}
if count != expectFiles {
t.Fatalf("export, has %d files, expected %d", count, expectFiles)
}
}
testExport("/mail-export-maildir.tgz", false, 6) // 2 mailboxes, each with 2 messages and a dovecot-keyword file
testExport("/mail-export-maildir.zip", true, 6)
testExport("/mail-export-mbox.tgz", false, 2)
testExport("/mail-export-mbox.zip", true, 2)
}

181
http/accountapi.json Normal file
View File

@ -0,0 +1,181 @@
{
"Name": "Account",
"Docs": "Account exports web API functions for the account web interface. All its\nmethods are exported under api/. Function calls require valid HTTP\nAuthentication credentials of a user.",
"Functions": [
{
"Name": "SetPassword",
"Docs": "SetPassword saves a new password for the account, invalidating the previous password.\nSessions are not interrupted, and will keep working. New login attempts must use the new password.\nPassword must be at least 8 characters.",
"Params": [
{
"Name": "password",
"Typewords": [
"string"
]
}
],
"Returns": []
},
{
"Name": "Destinations",
"Docs": "Destinations returns the default domain, and the destinations (keys are email\naddresses, or localparts to the default domain).\ntodo: replace with a function that returns the whole account, when sherpadoc understands unnamed struct fields.",
"Params": [],
"Returns": [
{
"Name": "r0",
"Typewords": [
"Domain"
]
},
{
"Name": "r1",
"Typewords": [
"{}",
"Destination"
]
}
]
},
{
"Name": "DestinationSave",
"Docs": "DestinationSave updates a destination.\nOldDest is compared against the current destination. If it does not match, an\nerror is returned. Otherwise newDest is saved and the configuration reloaded.",
"Params": [
{
"Name": "destName",
"Typewords": [
"string"
]
},
{
"Name": "oldDest",
"Typewords": [
"Destination"
]
},
{
"Name": "newDest",
"Typewords": [
"Destination"
]
}
],
"Returns": []
},
{
"Name": "ImportAbort",
"Docs": "ImportAbort aborts an import that is in progress. If the import exists and isn't\nfinished, no changes will have been made by the import.",
"Params": [
{
"Name": "importToken",
"Typewords": [
"string"
]
}
],
"Returns": []
}
],
"Sections": [],
"Structs": [
{
"Name": "Domain",
"Docs": "Domain is a domain name, with one or more labels, with at least an ASCII\nrepresentation, and for IDNA non-ASCII domains a unicode representation.\nThe ASCII string must be used for DNS lookups.",
"Fields": [
{
"Name": "ASCII",
"Docs": "A non-unicode domain, e.g. with A-labels (xn--...) or NR-LDH (non-reserved letters/digits/hyphens) labels. Always in lower case.",
"Typewords": [
"string"
]
},
{
"Name": "Unicode",
"Docs": "Name as U-labels. Empty if this is an ASCII-only domain.",
"Typewords": [
"string"
]
}
]
},
{
"Name": "Destination",
"Docs": "",
"Fields": [
{
"Name": "Mailbox",
"Docs": "",
"Typewords": [
"string"
]
},
{
"Name": "Rulesets",
"Docs": "",
"Typewords": [
"[]",
"Ruleset"
]
}
]
},
{
"Name": "Ruleset",
"Docs": "",
"Fields": [
{
"Name": "SMTPMailFromRegexp",
"Docs": "",
"Typewords": [
"string"
]
},
{
"Name": "VerifiedDomain",
"Docs": "",
"Typewords": [
"string"
]
},
{
"Name": "HeadersRegexp",
"Docs": "",
"Typewords": [
"{}",
"string"
]
},
{
"Name": "ListAllowDomain",
"Docs": "",
"Typewords": [
"string"
]
},
{
"Name": "Mailbox",
"Docs": "",
"Typewords": [
"string"
]
},
{
"Name": "VerifiedDNSDomain",
"Docs": "",
"Typewords": [
"Domain"
]
},
{
"Name": "ListAllowDNSDomain",
"Docs": "",
"Typewords": [
"Domain"
]
}
]
}
],
"Ints": [],
"Strings": [],
"SherpaVersion": 0,
"SherpadocVersion": 1
}

1634
http/admin.go Normal file

File diff suppressed because it is too large Load Diff

2214
http/admin.html Normal file

File diff suppressed because it is too large Load Diff

133
http/admin_test.go Normal file
View File

@ -0,0 +1,133 @@
package http
import (
"context"
"crypto/ed25519"
"net"
"net/http/httptest"
"os"
"testing"
"time"
"golang.org/x/crypto/bcrypt"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mox-"
)
func init() {
mox.LimitersInit()
}
func TestAdminAuth(t *testing.T) {
test := func(passwordfile, authHdr string, expect bool) {
t.Helper()
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/ignored", nil)
if authHdr != "" {
r.Header.Add("Authorization", authHdr)
}
ok := checkAdminAuth(context.Background(), passwordfile, w, r)
if ok != expect {
t.Fatalf("got %v, expected %v", ok, expect)
}
}
const authOK = "Basic YWRtaW46bW94dGVzdDEyMw==" // admin:moxtest123
const authBad = "Basic YWRtaW46YmFkcGFzc3dvcmQ=" // admin:badpassword
const path = "../testdata/http-passwordfile"
os.Remove(path)
defer os.Remove(path)
test(path, authOK, false) // Password file does not exist.
adminpwhash, err := bcrypt.GenerateFromPassword([]byte("moxtest123"), bcrypt.DefaultCost)
if err != nil {
t.Fatalf("generate bcrypt hash: %v", err)
}
if err := os.WriteFile(path, adminpwhash, 0660); err != nil {
t.Fatalf("write password file: %v", err)
}
// We loop to also exercise the auth cache.
for i := 0; i < 2; i++ {
test(path, "", false) // Empty/missing header.
test(path, "Malformed ", false) // Not "Basic"
test(path, "Basic malformed ", false) // Bad base64.
test(path, "Basic dGVzdA== ", false) // base64 is ok, but wrong tokens inside.
test(path, authBad, false) // Wrong password.
test(path, authOK, true)
}
}
func TestCheckDomain(t *testing.T) {
// NOTE: we aren't currently looking at the results, having the code paths executed is better than nothing.
resolver := dns.MockResolver{
MX: map[string][]*net.MX{
"mox.example.": {{Host: "mail.mox.example.", Pref: 10}},
},
A: map[string][]string{
"mail.mox.example.": {"127.0.0.2"},
},
AAAA: map[string][]string{
"mail.mox.example.": {"127.0.0.2"},
},
TXT: map[string][]string{
"mox.example.": {"v=spf1 mx -all"},
"test._domainkey.mox.example.": {"v=DKIM1;h=sha256;k=ed25519;p=ln5zd/JEX4Jy60WAhUOv33IYm2YZMyTQAdr9stML504="},
"_dmarc.mox.example.": {"v=DMARC1; p=reject; rua=mailto:mjl@mox.example"},
"_smtp._tls.mox.example": {"v=TLSRPTv1; rua=mailto:tlsrpt@mox.example;"},
"_mta-sts.mox.example": {"v=STSv1; id=20160831085700Z"},
},
CNAME: map[string]string{},
}
listener := config.Listener{
IPs: []string{"127.0.0.2"},
Hostname: "mox.example",
HostnameDomain: dns.Domain{ASCII: "mox.example"},
}
listener.SMTP.Enabled = true
listener.AutoconfigHTTPS.Enabled = true
listener.MTASTSHTTPS.Enabled = true
mox.Conf.Static.Listeners = map[string]config.Listener{
"public": listener,
}
domain := config.Domain{
DKIM: config.DKIM{
Selectors: map[string]config.Selector{
"test": {
HashEffective: "sha256",
HeadersEffective: []string{"From", "Date", "Subject"},
Key: ed25519.NewKeyFromSeed(make([]byte, 32)), // warning: fake zero key, do not copy this code.
Domain: dns.Domain{ASCII: "test"},
},
"missing": {
HashEffective: "sha256",
HeadersEffective: []string{"From", "Date", "Subject"},
Key: ed25519.NewKeyFromSeed(make([]byte, 32)), // warning: fake zero key, do not copy this code.
Domain: dns.Domain{ASCII: "missing"},
},
},
Sign: []string{"test", "test2"},
},
}
mox.Conf.Dynamic.Domains = map[string]config.Domain{
"mox.example": domain,
}
// Make a dialer that fails immediately before actually connecting.
done := make(chan struct{})
close(done)
dialer := &net.Dialer{Deadline: time.Now().Add(-time.Second), Cancel: done}
checkDomain(context.Background(), resolver, dialer, "mox.example")
// todo: check returned data
Admin{}.Domains(context.Background()) // todo: check results
dnsblsStatus(context.Background(), resolver) // todo: check results
}

3501
http/adminapi.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +0,0 @@
//go:build !netbsd && !freebsd && !darwin && !windows
package http
import (
"fmt"
"syscall"
)
func statAtime(sys any) (int64, error) {
x, ok := sys.(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("sys is a %T, expected *syscall.Stat_t", sys)
}
return int64(x.Atim.Sec)*1000*1000*1000 + int64(x.Atim.Nsec), nil
}

View File

@ -1,16 +0,0 @@
//go:build netbsd || freebsd || darwin
package http
import (
"fmt"
"syscall"
)
func statAtime(sys any) (int64, error) {
x, ok := sys.(*syscall.Stat_t)
if !ok {
return 0, fmt.Errorf("stat sys is a %T, expected *syscall.Stat_t", sys)
}
return int64(x.Atimespec.Sec)*1000*1000*1000 + int64(x.Atimespec.Nsec), nil
}

View File

@ -1,16 +0,0 @@
//go:build windows
package http
import (
"fmt"
"syscall"
)
func statAtime(sys any) (int64, error) {
x, ok := sys.(*syscall.Win32FileAttributeData)
if !ok {
return 0, fmt.Errorf("sys is a %T, expected *syscall.Win32FileAttributeData", sys)
}
return x.LastAccessTime.Nanoseconds(), nil
}

View File

@ -3,16 +3,14 @@ package http
import ( import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"log/slog"
"net/http" "net/http"
"strings"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"rsc.io/qr"
"github.com/mjl-/mox/admin" "github.com/mjl-/mox/config"
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/smtp" "github.com/mjl-/mox/smtp"
) )
@ -38,9 +36,7 @@ var (
// - Thunderbird will request an "autoconfig" xml file. // - Thunderbird will request an "autoconfig" xml file.
// - Microsoft tools will request an "autodiscovery" xml file. // - Microsoft tools will request an "autodiscovery" xml file.
// - In my tests on an internal domain, iOS mail only talks to Apple servers, then // - In my tests on an internal domain, iOS mail only talks to Apple servers, then
// does not attempt autoconfiguration. Possibly due to them being private DNS // does not attempt autoconfiguration. Possibly due to them being private DNS names.
// names. Apple software can be provisioned with "mobileconfig" profile files,
// which users can download after logging in.
// //
// DNS records seem optional, but autoconfig.<domain> and autodiscover.<domain> // DNS records seem optional, but autoconfig.<domain> and autodiscover.<domain>
// (both CNAME or A) are useful, and so is SRV _autodiscovery._tcp.<domain> 0 0 443 // (both CNAME or A) are useful, and so is SRV _autodiscovery._tcp.<domain> 0 0 443
@ -56,7 +52,7 @@ var (
// User should create a DNS record: autoconfig.<domain> (CNAME or A). // User should create a DNS record: autoconfig.<domain> (CNAME or A).
// See https://wiki.mozilla.org/Thunderbird:Autoconfiguration:ConfigFileFormat // See https://wiki.mozilla.org/Thunderbird:Autoconfiguration:ConfigFileFormat
func autoconfHandle(w http.ResponseWriter, r *http.Request) { func autoconfHandle(w http.ResponseWriter, r *http.Request) {
log := pkglog.WithContext(r.Context()) log := xlog.WithContext(r.Context())
var addrDom string var addrDom string
defer func() { defer func() {
@ -64,123 +60,99 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
}() }()
email := r.FormValue("emailaddress") email := r.FormValue("emailaddress")
log.Debug("autoconfig request", slog.String("email", email)) log.Debug("autoconfig request", mlog.Field("email", email))
var domain dns.Domain addr, err := smtp.ParseAddress(email)
if email == "" {
email = "%EMAILADDRESS%"
// Declare this here rather than using := to avoid shadowing domain from
// the outer scope.
var err error
domain, err = dns.ParseDomain(r.Host)
if err != nil {
http.Error(w, fmt.Sprintf("400 - bad request - invalid domain: %s", r.Host), http.StatusBadRequest)
return
}
domain.ASCII = strings.TrimPrefix(domain.ASCII, "autoconfig.")
domain.Unicode = strings.TrimPrefix(domain.Unicode, "autoconfig.")
} else {
addr, err := smtp.ParseAddress(email)
if err != nil {
http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
return
}
domain = addr.Domain
}
socketType := func(tlsMode admin.TLSMode) (string, error) {
switch tlsMode {
case admin.TLSModeImmediate:
return "SSL", nil
case admin.TLSModeSTARTTLS:
return "STARTTLS", nil
case admin.TLSModeNone:
return "plain", nil
default:
return "", fmt.Errorf("unknown tls mode %v", tlsMode)
}
}
var imapTLS, submissionTLS string
config, err := admin.ClientConfigDomain(domain)
if err == nil {
imapTLS, err = socketType(config.IMAP.TLSMode)
}
if err == nil {
submissionTLS, err = socketType(config.Submission.TLSMode)
}
if err != nil { if err != nil {
http.Error(w, "400 - bad request - "+err.Error(), http.StatusBadRequest) http.Error(w, "400 - bad request - invalid parameter emailaddress", http.StatusBadRequest)
return return
} }
if _, ok := mox.Conf.Domain(addr.Domain); !ok {
http.Error(w, "400 - bad request - unknown domain", http.StatusBadRequest)
return
}
addrDom = addr.Domain.Name()
hostname := mox.Conf.Static.HostnameDomain
// Thunderbird doesn't seem to allow U-labels, always return ASCII names. // Thunderbird doesn't seem to allow U-labels, always return ASCII names.
var resp autoconfigResponse var resp autoconfigResponse
resp.Version = "1.1" resp.Version = "1.1"
resp.EmailProvider.ID = domain.ASCII resp.EmailProvider.ID = addr.Domain.ASCII
resp.EmailProvider.Domain = domain.ASCII resp.EmailProvider.Domain = addr.Domain.ASCII
resp.EmailProvider.DisplayName = email resp.EmailProvider.DisplayName = email
resp.EmailProvider.DisplayShortName = domain.ASCII resp.EmailProvider.DisplayShortName = addr.Domain.ASCII
var imapPort int
var imapSocket string
for _, l := range mox.Conf.Static.Listeners {
if l.IMAPS.Enabled {
imapSocket = "SSL"
imapPort = config.Port(l.IMAPS.Port, 993)
} else if l.IMAP.Enabled {
if l.TLS != nil && imapSocket != "SSL" {
imapSocket = "STARTTLS"
imapPort = config.Port(l.IMAP.Port, 143)
} else if imapSocket == "" {
imapSocket = "plain"
imapPort = config.Port(l.IMAP.Port, 143)
}
}
}
if imapPort == 0 {
log.Error("autoconfig: no imap configured?")
}
// todo: specify SCRAM-SHA-256 once thunderbird and autoconfig supports it. or perhaps that will fall under "password-encrypted" by then. // todo: specify SCRAM-SHA-256 once thunderbird and autoconfig supports it. or perhaps that will fall under "password-encrypted" by then.
// todo: let user configure they prefer or require tls client auth and specify "TLS-client-cert"
incoming := incomingServer{ resp.EmailProvider.IncomingServer.Type = "imap"
"imap", resp.EmailProvider.IncomingServer.Hostname = hostname.ASCII
config.IMAP.Host.ASCII, resp.EmailProvider.IncomingServer.Port = imapPort
config.IMAP.Port, resp.EmailProvider.IncomingServer.SocketType = imapSocket
imapTLS, resp.EmailProvider.IncomingServer.Username = email
email, resp.EmailProvider.IncomingServer.Authentication = "password-encrypted"
"password-encrypted",
} var smtpPort int
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incoming) var smtpSocket string
if config.IMAP.EnabledOnHTTPS { for _, l := range mox.Conf.Static.Listeners {
tlsMode, _ := socketType(admin.TLSModeImmediate) if l.Submissions.Enabled {
incomingALPN := incomingServer{ smtpSocket = "SSL"
"imap", smtpPort = config.Port(l.Submissions.Port, 465)
config.IMAP.Host.ASCII, } else if l.Submission.Enabled {
443, if l.TLS != nil && smtpSocket != "SSL" {
tlsMode, smtpSocket = "STARTTLS"
email, smtpPort = config.Port(l.Submission.Port, 587)
"password-encrypted", } else if smtpSocket == "" {
smtpSocket = "plain"
smtpPort = config.Port(l.Submission.Port, 587)
}
} }
resp.EmailProvider.IncomingServers = append(resp.EmailProvider.IncomingServers, incomingALPN) }
if smtpPort == 0 {
log.Error("autoconfig: no smtp submission configured?")
} }
outgoing := outgoingServer{ resp.EmailProvider.OutgoingServer.Type = "smtp"
"smtp", resp.EmailProvider.OutgoingServer.Hostname = hostname.ASCII
config.Submission.Host.ASCII, resp.EmailProvider.OutgoingServer.Port = smtpPort
config.Submission.Port, resp.EmailProvider.OutgoingServer.SocketType = smtpSocket
submissionTLS, resp.EmailProvider.OutgoingServer.Username = email
email, resp.EmailProvider.OutgoingServer.Authentication = "password-encrypted"
"password-encrypted",
}
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoing)
if config.Submission.EnabledOnHTTPS {
tlsMode, _ := socketType(admin.TLSModeImmediate)
outgoingALPN := outgoingServer{
"smtp",
config.Submission.Host.ASCII,
443,
tlsMode,
email,
"password-encrypted",
}
resp.EmailProvider.OutgoingServers = append(resp.EmailProvider.OutgoingServers, outgoingALPN)
}
// todo: should we put the email address in the URL? // todo: should we put the email address in the URL?
resp.ClientConfigUpdate.URL = fmt.Sprintf("https://autoconfig.%s/mail/config-v1.1.xml", domain.ASCII) resp.ClientConfigUpdate.URL = fmt.Sprintf("https://%s/mail/config-v1.1.xml", hostname.ASCII)
w.Header().Set("Content-Type", "application/xml; charset=utf-8") w.Header().Set("Content-Type", "application/xml; charset=utf-8")
enc := xml.NewEncoder(w) enc := xml.NewEncoder(w)
enc.Indent("", "\t") enc.Indent("", "\t")
fmt.Fprint(w, xml.Header) fmt.Fprint(w, xml.Header)
err = enc.Encode(resp) if err := enc.Encode(resp); err != nil {
log.Check(err, "write autoconfig xml response") log.Errorx("marshal autoconfig response", err)
}
} }
// Autodiscover from Microsoft, also used by Thunderbird. // Autodiscover from Microsoft, also used by Thunderbird.
// User should create a DNS record: _autodiscover._tcp.<domain> SRV 0 0 443 <hostname> // User should create a DNS record: _autodiscover._tcp.<domain> IN SRV 0 0 443 <hostname or autodiscover.<domain>>
// //
// In practice, autodiscover does not seem to work wit microsoft clients. A // In practice, autodiscover does not seem to work wit microsoft clients. A
// connectivity test tool for outlook is available on // connectivity test tool for outlook is available on
@ -190,7 +162,7 @@ func autoconfHandle(w http.ResponseWriter, r *http.Request) {
// //
// Thunderbird does understand autodiscover. // Thunderbird does understand autodiscover.
func autodiscoverHandle(w http.ResponseWriter, r *http.Request) { func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
log := pkglog.WithContext(r.Context()) log := xlog.WithContext(r.Context())
var addrDom string var addrDom string
defer func() { defer func() {
@ -208,7 +180,7 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
return return
} }
log.Debug("autodiscover request", slog.String("email", req.Request.EmailAddress)) log.Debug("autodiscover request", mlog.Field("email", req.Request.EmailAddress))
addr, err := smtp.ParseAddress(req.Request.EmailAddress) addr, err := smtp.ParseAddress(req.Request.EmailAddress)
if err != nil { if err != nil {
@ -216,33 +188,13 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
return return
} }
// tlsmode returns the "ssl" and "encryption" fields. if _, ok := mox.Conf.Domain(addr.Domain); !ok {
tlsmode := func(tlsMode admin.TLSMode) (string, string, error) { http.Error(w, "400 - bad request - unknown domain", http.StatusBadRequest)
switch tlsMode {
case admin.TLSModeImmediate:
return "on", "TLS", nil
case admin.TLSModeSTARTTLS:
return "on", "", nil
case admin.TLSModeNone:
return "off", "", nil
default:
return "", "", fmt.Errorf("unknown tls mode %v", tlsMode)
}
}
var imapSSL, imapEncryption string
var submissionSSL, submissionEncryption string
config, err := admin.ClientConfigDomain(addr.Domain)
if err == nil {
imapSSL, imapEncryption, err = tlsmode(config.IMAP.TLSMode)
}
if err == nil {
submissionSSL, submissionEncryption, err = tlsmode(config.Submission.TLSMode)
}
if err != nil {
http.Error(w, "400 - bad request - "+err.Error(), http.StatusBadRequest)
return return
} }
addrDom = addr.Domain.Name()
hostname := mox.Conf.Static.HostnameDomain
// The docs are generated and fragmented in many tiny pages, hard to follow. // The docs are generated and fragmented in many tiny pages, hard to follow.
// High-level starting point, https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/78530279-d042-4eb0-a1f4-03b18143cd19 // High-level starting point, https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/78530279-d042-4eb0-a1f4-03b18143cd19
@ -253,9 +205,48 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
// use. See // use. See
// https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726 // https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726
w.Header().Set("Content-Type", "application/xml; charset=utf-8") var imapPort int
imapSSL := "off"
var imapEncryption string
// todo: let user configure they prefer or require tls client auth and add "AuthPackage" with value "certificate" to Protocol? see https://learn.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxdscli/21fd2dd5-c4ee-485b-94fb-e7db5da93726 var smtpPort int
smtpSSL := "off"
var smtpEncryption string
for _, l := range mox.Conf.Static.Listeners {
if l.IMAPS.Enabled {
imapPort = config.Port(l.IMAPS.Port, 993)
imapSSL = "on"
imapEncryption = "TLS" // Assuming this means direct TLS.
} else if l.IMAP.Enabled {
if l.TLS != nil && imapEncryption != "TLS" {
imapSSL = "on"
imapPort = config.Port(l.IMAP.Port, 143)
} else if imapSSL == "" {
imapPort = config.Port(l.IMAP.Port, 143)
}
}
if l.Submissions.Enabled {
smtpPort = config.Port(l.Submissions.Port, 465)
smtpSSL = "on"
smtpEncryption = "TLS" // Assuming this means direct TLS.
} else if l.Submission.Enabled {
if l.TLS != nil && smtpEncryption != "TLS" {
smtpSSL = "on"
smtpPort = config.Port(l.Submission.Port, 587)
} else if smtpSSL == "" {
smtpPort = config.Port(l.Submission.Port, 587)
}
}
}
if imapPort == 0 {
log.Error("autoconfig: no smtp submission configured?")
}
if smtpPort == 0 {
log.Error("autoconfig: no imap configured?")
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
resp := autodiscoverResponse{} resp := autodiscoverResponse{}
resp.XMLName.Local = "Autodiscover" resp.XMLName.Local = "Autodiscover"
@ -268,8 +259,8 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
Protocol: []autodiscoverProtocol{ Protocol: []autodiscoverProtocol{
{ {
Type: "IMAP", Type: "IMAP",
Server: config.IMAP.Host.ASCII, Server: hostname.ASCII,
Port: config.IMAP.Port, Port: imapPort,
LoginName: req.Request.EmailAddress, LoginName: req.Request.EmailAddress,
SSL: imapSSL, SSL: imapSSL,
Encryption: imapEncryption, Encryption: imapEncryption,
@ -278,11 +269,11 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
}, },
{ {
Type: "SMTP", Type: "SMTP",
Server: config.Submission.Host.ASCII, Server: hostname.ASCII,
Port: config.Submission.Port, Port: smtpPort,
LoginName: req.Request.EmailAddress, LoginName: req.Request.EmailAddress,
SSL: submissionSSL, SSL: smtpSSL,
Encryption: submissionEncryption, Encryption: smtpEncryption,
SPA: "off", // Override default "on", this is Microsofts proprietary authentication protocol. SPA: "off", // Override default "on", this is Microsofts proprietary authentication protocol.
AuthRequired: "on", AuthRequired: "on",
}, },
@ -291,8 +282,9 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
enc := xml.NewEncoder(w) enc := xml.NewEncoder(w)
enc.Indent("", "\t") enc.Indent("", "\t")
fmt.Fprint(w, xml.Header) fmt.Fprint(w, xml.Header)
err = enc.Encode(resp) if err := enc.Encode(resp); err != nil {
log.Check(err, "marshal autodiscover xml response") log.Errorx("marshal autodiscover response", err)
}
} }
// Thunderbird requests these URLs for autoconfig/autodiscover: // Thunderbird requests these URLs for autoconfig/autodiscover:
@ -300,22 +292,6 @@ func autodiscoverHandle(w http.ResponseWriter, r *http.Request) {
// https://autodiscover.example.org/autodiscover/autodiscover.xml // https://autodiscover.example.org/autodiscover/autodiscover.xml
// https://example.org/.well-known/autoconfig/mail/config-v1.1.xml?emailaddress=user%40example.org // https://example.org/.well-known/autoconfig/mail/config-v1.1.xml?emailaddress=user%40example.org
// https://example.org/autodiscover/autodiscover.xml // https://example.org/autodiscover/autodiscover.xml
type incomingServer struct {
Type string `xml:"type,attr"`
Hostname string `xml:"hostname"`
Port int `xml:"port"`
SocketType string `xml:"socketType"`
Username string `xml:"username"`
Authentication string `xml:"authentication"`
}
type outgoingServer struct {
Type string `xml:"type,attr"`
Hostname string `xml:"hostname"`
Port int `xml:"port"`
SocketType string `xml:"socketType"`
Username string `xml:"username"`
Authentication string `xml:"authentication"`
}
type autoconfigResponse struct { type autoconfigResponse struct {
XMLName xml.Name `xml:"clientConfig"` XMLName xml.Name `xml:"clientConfig"`
Version string `xml:"version,attr"` Version string `xml:"version,attr"`
@ -326,8 +302,23 @@ type autoconfigResponse struct {
DisplayName string `xml:"displayName"` DisplayName string `xml:"displayName"`
DisplayShortName string `xml:"displayShortName"` DisplayShortName string `xml:"displayShortName"`
IncomingServers []incomingServer `xml:"incomingServer"` IncomingServer struct {
OutgoingServers []outgoingServer `xml:"outgoingServer"` Type string `xml:"type,attr"`
Hostname string `xml:"hostname"`
Port int `xml:"port"`
SocketType string `xml:"socketType"`
Username string `xml:"username"`
Authentication string `xml:"authentication"`
} `xml:"incomingServer"`
OutgoingServer struct {
Type string `xml:"type,attr"`
Hostname string `xml:"hostname"`
Port int `xml:"port"`
SocketType string `xml:"socketType"`
Username string `xml:"username"`
Authentication string `xml:"authentication"`
} `xml:"outgoingServer"`
} `xml:"emailProvider"` } `xml:"emailProvider"`
ClientConfigUpdate struct { ClientConfigUpdate struct {
@ -369,72 +360,3 @@ type autodiscoverProtocol struct {
SPA string SPA string
AuthRequired string AuthRequired string
} }
// Serve a .mobileconfig file. This endpoint is not a standard place where Apple
// devices look. We point to it from the account page.
func mobileconfigHandle(w http.ResponseWriter, r *http.Request) {
log := pkglog.WithContext(r.Context())
if r.Method != "GET" {
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
return
}
addresses := r.FormValue("addresses")
fullName := r.FormValue("name")
var buf []byte
var err error
if addresses == "" {
err = fmt.Errorf("missing/empty field addresses")
}
l := strings.Split(addresses, ",")
if err == nil {
buf, err = MobileConfig(l, fullName)
}
if err != nil {
http.Error(w, "400 - bad request - "+err.Error(), http.StatusBadRequest)
return
}
h := w.Header()
filename := l[0]
filename = strings.ReplaceAll(filename, ".", "-")
filename = strings.ReplaceAll(filename, "@", "-at-")
filename = "email-account-" + filename + ".mobileconfig"
h.Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
_, err = w.Write(buf)
log.Check(err, "writing mobileconfig response")
}
// Serve a png file with qrcode with the link to the .mobileconfig file, should be
// helpful for mobile devices.
func mobileconfigQRCodeHandle(w http.ResponseWriter, r *http.Request) {
log := pkglog.WithContext(r.Context())
if r.Method != "GET" {
http.Error(w, "405 - method not allowed - get required", http.StatusMethodNotAllowed)
return
}
if !strings.HasSuffix(r.URL.Path, ".qrcode.png") {
http.NotFound(w, r)
return
}
// Compose URL, scheme and host are not set.
u := *r.URL
if r.TLS == nil {
u.Scheme = "http"
} else {
u.Scheme = "https"
}
u.Host = r.Host
u.Path = strings.TrimSuffix(u.Path, ".qrcode.png")
code, err := qr.Encode(u.String(), qr.L)
if err != nil {
http.Error(w, "500 - internal server error - generating qr-code: "+err.Error(), http.StatusInternalServerError)
return
}
h := w.Header()
h.Set("Content-Type", "image/png")
_, err = w.Write(code.PNG())
log.Check(err, "writing mobileconfig qr code")
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 823 B

View File

@ -1,429 +0,0 @@
package http
import (
"compress/gzip"
"encoding/base64"
"errors"
"fmt"
"io"
"io/fs"
"log/slog"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/mjl-/mox/mlog"
)
// todo: consider caching gzipped responses from forward handlers too. we would need to read the responses (handle up to perhaps 2mb), hash the data (blake2b seems fast), check if we have the gzip content for that hash, cache it on second request. keep around entries for non-yet-cached hashes, with some limit and lru eviction policy. we have to recognize some content-types as not applicable and do direct streaming compression, e.g. for text/event-stream. and we need to detect when backend server could be slowly sending out data and abort the caching attempt. downside is always that we need to read the whole response before and hash it before we can send our response. it is best if the backend just responds with gzip itself though. compression needs more cpu than hashing (at least 10x), but it's only worth it with enough hits.
// Cache for gzipped static files.
var staticgzcache gzcache
type gzcache struct {
dir string // Where all files are stored.
// Max total size of combined files in cache. When adding a new entry, the least
// recently used entries are evicted to stay below this size.
maxSize int64
sync.Mutex
// Total on-disk size of compressed data. Not larger than maxSize. We can
// temporarily have more bytes in use because while/after evicting, a writer may
// still have the old removed file open.
size int64
// Indexed by effective path, based on handler.
paths map[string]gzfile
// Only with files we completed compressing, kept ordered by atime. We evict from
// oldest. On use, we take entries out and put them at newest.
oldest, newest *pathUse
}
type gzfile struct {
// Whether compressing in progress. If a new request comes in while we are already
// compressing, for simplicity of code we just compress again for that client.
compressing bool
mtime int64 // If mtime changes, we remove entry from cache.
atime int64 // For LRU.
gzsize int64 // Compressed size, used in Content-Length header.
use *pathUse // Only set after compressing finished.
}
type pathUse struct {
prev, next *pathUse // Double-linked list.
path string
}
// Initialize staticgzcache from on-disk directory.
// The path and mtime are in the filename, the atime is in the file itself.
func loadStaticGzipCache(dir string, maxSize int64) {
staticgzcache = gzcache{
dir: dir,
maxSize: maxSize,
paths: map[string]gzfile{},
}
// todo future: should we split cached files in sub directories, so we don't end up with one huge directory?
os.MkdirAll(dir, 0700)
entries, err := os.ReadDir(dir)
if err != nil && !os.IsNotExist(err) {
pkglog.Errorx("listing static gzip cache files", err, slog.String("dir", dir))
}
for _, e := range entries {
name := e.Name()
var err error
if !strings.HasSuffix(name, ".gz") {
err = errors.New("missing .gz suffix")
}
var path, xpath, mtimestr string
if err == nil {
var ok bool
xpath, mtimestr, ok = strings.Cut(strings.TrimRight(name, ".gz"), "+")
if !ok {
err = fmt.Errorf("missing + in filename")
}
}
if err == nil {
var pathbuf []byte
pathbuf, err = base64.RawURLEncoding.DecodeString(xpath)
if err == nil {
path = string(pathbuf)
}
}
var mtime int64
if err == nil {
mtime, err = strconv.ParseInt(mtimestr, 16, 64)
}
var fi fs.FileInfo
if err == nil {
fi, err = e.Info()
}
var atime int64
if err == nil {
atime, err = statAtime(fi.Sys())
}
if err != nil {
pkglog.Infox("removing unusable/unrecognized file in static gzip cache dir", err)
xerr := os.Remove(filepath.Join(dir, name))
pkglog.Check(xerr, "removing unusable file in static gzip cache dir",
slog.Any("error", err),
slog.String("dir", dir),
slog.String("filename", name))
continue
}
staticgzcache.paths[path] = gzfile{
mtime: mtime,
atime: atime,
gzsize: fi.Size(),
use: &pathUse{path: path},
}
staticgzcache.size += fi.Size()
}
pathatimes := make([]struct {
path string
atime int64
}, len(staticgzcache.paths))
i := 0
for k, gf := range staticgzcache.paths {
pathatimes[i].path = k
pathatimes[i].atime = gf.atime
i++
}
sort.Slice(pathatimes, func(i, j int) bool {
return pathatimes[i].atime < pathatimes[j].atime
})
for _, pa := range pathatimes {
staticgzcache.push(staticgzcache.paths[pa.path].use)
}
// Ensure cache size is OK for current config.
staticgzcache.evictFor(0)
}
// Evict entries so size bytes are available.
// Must be called with lock held.
func (c *gzcache) evictFor(size int64) {
for c.size+size > c.maxSize && c.oldest != nil {
c.evictPath(c.oldest.path)
}
}
// remove path from cache.
// Must be called with lock held.
func (c *gzcache) evictPath(path string) {
gf := c.paths[path]
delete(c.paths, path)
c.unlink(gf.use)
c.size -= gf.gzsize
err := os.Remove(staticCachePath(c.dir, path, gf.mtime))
pkglog.Check(err, "removing cached gzipped static file", slog.String("path", path))
}
// Open cached file for path, requiring it has mtime. If there is no usable cached
// file, a nil file is returned and the caller should compress and add to the cache
// with startPath and finishPath. No usable cached file means the path isn't in the
// cache, or its mtime is different, or there is an entry but it is new and being
// compressed at the moment. If a usable cached file was found, it is opened and
// returned, along with its compressed/on-disk size.
func (c *gzcache) openPath(path string, mtime int64) (*os.File, int64) {
c.Lock()
defer c.Unlock()
gf, ok := c.paths[path]
if !ok || gf.compressing {
return nil, 0
}
if gf.mtime != mtime {
// File has changed, remove old entry. Caller will add to cache again.
c.evictPath(path)
return nil, 0
}
p := staticCachePath(c.dir, path, gf.mtime)
f, err := os.Open(p)
if err != nil {
pkglog.Errorx("open static cached gzip file, removing from cache", err, slog.String("path", path))
// Perhaps someone removed the file? Remove from cache, it will be recreated.
c.evictPath(path)
return nil, 0
}
gf.atime = time.Now().UnixNano()
c.unlink(gf.use)
c.push(gf.use)
c.paths[path] = gf
return f, gf.gzsize
}
// startPath attempts to add an entry to the cache for a new cached compressed
// file. If there is already an entry but it isn't done compressing yet, false is
// returned and the caller can still compress and respond but the entry cannot be
// added to the cache. If the entry is being added, the caller must call finishPath
// or abortPath.
func (c *gzcache) startPath(path string, mtime int64) bool {
c.Lock()
defer c.Unlock()
if _, ok := c.paths[path]; ok {
return false
}
// note: no "use" yet, we only set that when we finish, so we don't have to clean up on abort.
c.paths[path] = gzfile{compressing: true, mtime: mtime}
return true
}
// finishPath completes adding an entry to the cache, marking the entry as
// compressed, accounting for its size, and marking its atime.
func (c *gzcache) finishPath(path string, gzsize int64) {
c.Lock()
defer c.Unlock()
c.evictFor(gzsize)
gf := c.paths[path]
gf.compressing = false
gf.gzsize = gzsize
gf.atime = time.Now().UnixNano()
gf.use = &pathUse{path: path}
c.paths[path] = gf
c.size += gzsize
c.push(gf.use)
}
// abortPath marks an entry as no longer being added to the cache.
func (c *gzcache) abortPath(path string) {
c.Lock()
defer c.Unlock()
delete(c.paths, path)
// note: gzfile.use isn't set yet.
}
// push inserts the "pathUse" to the head of the LRU doubly-linked list, unlinking
// it first if needed.
func (c *gzcache) push(u *pathUse) {
c.unlink(u)
u.prev = c.newest
if c.newest != nil {
c.newest.next = u
}
if c.oldest == nil {
c.oldest = u
}
c.newest = u
}
// unlink removes the "pathUse" from the LRU doubly-linked list.
func (c *gzcache) unlink(u *pathUse) {
if c.oldest == u {
c.oldest = u.next
}
if c.newest == u {
c.newest = u.prev
}
if u.prev != nil {
u.prev.next = u.next
}
if u.next != nil {
u.next.prev = u.prev
}
u.prev = nil
u.next = nil
}
// Return path to the on-disk gzipped cached file.
func staticCachePath(dir, path string, mtime int64) string {
p := base64.RawURLEncoding.EncodeToString([]byte(path))
return filepath.Join(dir, fmt.Sprintf("%s+%x.gz", p, mtime))
}
// staticgzcacheReplacer intercepts responses for cacheable static files,
// responding with the cached content if appropriate and failing further writes so
// the regular response writer stops.
type staticgzcacheReplacer struct {
w http.ResponseWriter
r *http.Request // For its context, or logging.
uncomprPath string
uncomprFile *os.File
uncomprMtime time.Time
uncomprSize int64
statusCode int
// Set during WriteHeader to indicate a compressed file has been written, further
// Writes result in an error to stop the writer of the uncompressed content.
handled bool
}
func (w *staticgzcacheReplacer) logger() mlog.Log {
return pkglog.WithContext(w.r.Context())
}
// Header returns the header of the underlying ResponseWriter.
func (w *staticgzcacheReplacer) Header() http.Header {
return w.w.Header()
}
// WriteHeader checks whether the response is eligible for compressing. If not,
// WriteHeader on the underlying ResponseWriter is called. If so, headers for gzip
// content are set and the gzip content is written, either from disk or compressed
// and stored in the cache.
func (w *staticgzcacheReplacer) WriteHeader(statusCode int) {
if w.statusCode != 0 {
return
}
w.statusCode = statusCode
if statusCode != http.StatusOK {
w.w.WriteHeader(statusCode)
return
}
gzf, gzsize := staticgzcache.openPath(w.uncomprPath, w.uncomprMtime.UnixNano())
if gzf == nil {
// Not in cache, or work in progress.
started := staticgzcache.startPath(w.uncomprPath, w.uncomprMtime.UnixNano())
if !started {
// Another request is already compressing and storing this file.
// todo: we should just wait for the other compression to finish, then use its result.
w.w.(*loggingWriter).UncompressedSize = w.uncomprSize
h := w.w.Header()
h.Set("Content-Encoding", "gzip")
h.Del("Content-Length") // We don't know this, we compress streamingly.
gzw, _ := gzip.NewWriterLevel(w.w, gzip.BestSpeed)
_, err := io.Copy(gzw, w.uncomprFile)
if err == nil {
err = gzw.Close()
}
w.handled = true
if err != nil {
w.w.(*loggingWriter).error(err)
}
return
}
// Compress and write to cache.
p := staticCachePath(staticgzcache.dir, w.uncomprPath, w.uncomprMtime.UnixNano())
ngzf, err := os.OpenFile(p, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0600)
if err != nil {
w.logger().Errorx("create new static gzip cache file", err, slog.String("requestpath", w.uncomprPath), slog.String("fspath", p))
staticgzcache.abortPath(w.uncomprPath)
return
}
defer func() {
if ngzf != nil {
staticgzcache.abortPath(w.uncomprPath)
err := ngzf.Close()
w.logger().Check(err, "closing failed static gzip cache file", slog.String("requestpath", w.uncomprPath), slog.String("fspath", p))
err = os.Remove(p)
w.logger().Check(err, "removing failed static gzip cache file", slog.String("requestpath", w.uncomprPath), slog.String("fspath", p))
}
}()
gzw := gzip.NewWriter(ngzf)
_, err = io.Copy(gzw, w.uncomprFile)
if err == nil {
err = gzw.Close()
}
if err == nil {
err = ngzf.Sync()
}
if err == nil {
gzsize, err = ngzf.Seek(0, 1)
}
if err == nil {
_, err = ngzf.Seek(0, 0)
}
if err != nil {
w.w.(*loggingWriter).error(err)
return
}
staticgzcache.finishPath(w.uncomprPath, gzsize)
gzf = ngzf
ngzf = nil
}
defer func() {
if gzf != nil {
err := gzf.Close()
if err != nil {
w.logger().Errorx("closing static gzip cache file", err)
}
}
}()
// Signal to Write that we aleady (attempted to) write the responses.
w.handled = true
w.w.(*loggingWriter).UncompressedSize = w.uncomprSize
h := w.w.Header()
h.Set("Content-Encoding", "gzip")
h.Set("Content-Length", fmt.Sprintf("%d", gzsize))
w.w.WriteHeader(statusCode)
if _, err := io.Copy(w.w, gzf); err != nil {
w.w.(*loggingWriter).error(err)
}
}
var errHandledCompressed = errors.New("response written with compression")
func (w *staticgzcacheReplacer) Write(buf []byte) (int, error) {
if w.statusCode == 0 {
w.WriteHeader(http.StatusOK)
}
if w.handled {
// For 200 OK, we already wrote the response and just want the caller to stop processing.
return 0, errHandledCompressed
}
return w.w.Write(buf)
}

View File

@ -1,4 +1,4 @@
package webaccount package http
import ( import (
"archive/tar" "archive/tar"
@ -12,12 +12,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log/slog"
"maps"
"os" "os"
"path" "path"
"runtime/debug" "runtime/debug"
"slices"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -27,7 +24,6 @@ import (
"github.com/mjl-/bstore" "github.com/mjl-/bstore"
"github.com/mjl-/mox/message" "github.com/mjl-/mox/message"
"github.com/mjl-/mox/metrics"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-" "github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/store" "github.com/mjl-/mox/store"
@ -56,23 +52,20 @@ var importers = struct {
Unregister chan *importListener Unregister chan *importListener
Events chan importEvent Events chan importEvent
Abort chan importAbortRequest Abort chan importAbortRequest
Stop chan struct{}
}{ }{
make(chan *importListener, 1), make(chan *importListener, 1),
make(chan *importListener, 1), make(chan *importListener, 1),
make(chan importEvent), make(chan importEvent),
make(chan importAbortRequest), make(chan importAbortRequest),
make(chan struct{}),
} }
// ImportManage should be run as a goroutine, it manages imports of mboxes/maildirs, propagating progress over SSE connections. // manage imports, run in a goroutine before serving.
func ImportManage() { func importManage() {
log := mlog.New("httpimport", nil) log := mlog.New("httpimport")
defer func() { defer func() {
if x := recover(); x != nil { if x := recover(); x != nil {
log.Error("import manage panic", slog.Any("err", x)) log.Error("import manage panic", mlog.Field("err", x))
debug.PrintStack() debug.PrintStack()
metrics.PanicInc(metrics.Importmanage)
} }
}() }()
@ -90,38 +83,38 @@ func ImportManage() {
select { select {
case l := <-importers.Register: case l := <-importers.Register:
// If we have state, send it so the client is up to date. // If we have state, send it so the client is up to date.
s, ok := imports[l.Token] if s, ok := imports[l.Token]; ok {
l.Register <- ok l.Register <- true
if !ok { s.Listeners[l] = struct{}{}
break
}
s.Listeners[l] = struct{}{}
sendEvent := func(kind string, v any) { sendEvent := func(kind string, v any) {
buf, err := json.Marshal(v) buf, err := json.Marshal(v)
if err != nil { if err != nil {
log.Errorx("marshal event", err, slog.String("kind", kind), slog.Any("event", v)) log.Errorx("marshal event", err, mlog.Field("kind", kind), mlog.Field("event", v))
return return
}
ssemsg := fmt.Sprintf("event: %s\ndata: %s\n\n", kind, buf)
select {
case l.Events <- importEvent{kind, []byte(ssemsg), nil, nil}:
default:
log.Debug("dropped initial import event to slow consumer")
}
} }
ssemsg := fmt.Sprintf("event: %s\ndata: %s\n\n", kind, buf)
select { for m, c := range s.MailboxCounts {
case l.Events <- importEvent{kind, []byte(ssemsg), nil, nil}: sendEvent("count", importCount{m, c})
default:
log.Debug("dropped initial import event to slow consumer")
} }
} for _, p := range s.Problems {
sendEvent("problem", importProblem{p})
for m, c := range s.MailboxCounts { }
sendEvent("count", importCount{m, c}) if s.Done != nil {
} sendEvent("done", importDone{})
for _, p := range s.Problems { } else if s.Aborted != nil {
sendEvent("problem", importProblem{p}) sendEvent("aborted", importAborted{})
} }
if s.Done != nil { } else {
sendEvent("done", importDone{}) l.Register <- false
} else if s.Aborted != nil {
sendEvent("aborted", importAborted{})
} }
case l := <-importers.Unregister: case l := <-importers.Unregister:
@ -173,9 +166,6 @@ func ImportManage() {
} }
s.Cancel() s.Cancel()
a.Response <- nil a.Response <- nil
case <-importers.Stop:
return
} }
// Cleanup old state. // Cleanup old state.
@ -199,27 +189,25 @@ type importProblem struct {
} }
type importDone struct{} type importDone struct{}
type importAborted struct{} type importAborted struct{}
type importStep struct {
Title string
}
// importStart prepare the import and launches the goroutine to actually import. // importStart prepare the import and launches the goroutine to actually import.
// importStart is responsible for closing f and removing f. // importStart is responsible for closing f.
func importStart(log mlog.Log, accName string, f *os.File, skipMailboxPrefix string) (string, bool, error) { func importStart(log *mlog.Log, accName string, f *os.File, skipMailboxPrefix string) (string, error) {
defer func() { defer func() {
if f != nil { if f != nil {
store.CloseRemoveTempFile(log, f, "upload for import") err := f.Close()
log.Check(err, "closing uploaded file")
} }
}() }()
buf := make([]byte, 16) buf := make([]byte, 16)
if _, err := cryptrand.Read(buf); err != nil { if _, err := cryptrand.Read(buf); err != nil {
return "", false, err return "", err
} }
token := fmt.Sprintf("%x", buf) token := fmt.Sprintf("%x", buf)
if _, err := f.Seek(0, 0); err != nil { if _, err := f.Seek(0, 0); err != nil {
return "", false, fmt.Errorf("seek to start of file: %v", err) return "", fmt.Errorf("seek to start of file: %v", err)
} }
// Recognize file format. // Recognize file format.
@ -228,12 +216,12 @@ func importStart(log mlog.Log, accName string, f *os.File, skipMailboxPrefix str
magicGzip := []byte{0x1f, 0x8b} magicGzip := []byte{0x1f, 0x8b}
magic := make([]byte, 4) magic := make([]byte, 4)
if _, err := f.ReadAt(magic, 0); err != nil { if _, err := f.ReadAt(magic, 0); err != nil {
return "", true, fmt.Errorf("detecting file format: %v", err) return "", fmt.Errorf("detecting file format: %v", err)
} }
if bytes.Equal(magic, magicZip) { if bytes.Equal(magic, magicZip) {
iszip = true iszip = true
} else if !bytes.Equal(magic[:2], magicGzip) { } else if !bytes.Equal(magic[:2], magicGzip) {
return "", true, fmt.Errorf("file is not a zip or gzip file") return "", fmt.Errorf("file is not a zip or gzip file")
} }
var zr *zip.Reader var zr *zip.Reader
@ -241,23 +229,23 @@ func importStart(log mlog.Log, accName string, f *os.File, skipMailboxPrefix str
if iszip { if iszip {
fi, err := f.Stat() fi, err := f.Stat()
if err != nil { if err != nil {
return "", false, fmt.Errorf("stat temporary import zip file: %v", err) return "", fmt.Errorf("stat temporary import zip file: %v", err)
} }
zr, err = zip.NewReader(f, fi.Size()) zr, err = zip.NewReader(f, fi.Size())
if err != nil { if err != nil {
return "", true, fmt.Errorf("opening zip file: %v", err) return "", fmt.Errorf("opening zip file: %v", err)
} }
} else { } else {
gzr, err := gzip.NewReader(f) gzr, err := gzip.NewReader(f)
if err != nil { if err != nil {
return "", true, fmt.Errorf("gunzip: %v", err) return "", fmt.Errorf("gunzip: %v", err)
} }
tr = tar.NewReader(gzr) tr = tar.NewReader(gzr)
} }
acc, err := store.OpenAccount(log, accName, false) acc, err := store.OpenAccount(accName)
if err != nil { if err != nil {
return "", false, fmt.Errorf("open acount: %v", err) return "", fmt.Errorf("open acount: %v", err)
} }
acc.Lock() // Not using WithWLock because importMessage is responsible for unlocking. acc.Lock() // Not using WithWLock because importMessage is responsible for unlocking.
@ -266,7 +254,7 @@ func importStart(log mlog.Log, accName string, f *os.File, skipMailboxPrefix str
acc.Unlock() acc.Unlock()
xerr := acc.Close() xerr := acc.Close()
log.Check(xerr, "closing account") log.Check(xerr, "closing account")
return "", false, fmt.Errorf("start transaction: %v", err) return "", fmt.Errorf("start transaction: %v", err)
} }
// Ensure token is registered before returning, with context that can be canceled. // Ensure token is registered before returning, with context that can be canceled.
@ -275,14 +263,14 @@ func importStart(log mlog.Log, accName string, f *os.File, skipMailboxPrefix str
log.Info("starting import") log.Info("starting import")
go importMessages(ctx, log.WithCid(mox.Cid()), token, acc, tx, zr, tr, f, skipMailboxPrefix) go importMessages(ctx, log.WithCid(mox.Cid()), token, acc, tx, zr, tr, f, skipMailboxPrefix)
f = nil // importMessages is now responsible for closing and removing. f = nil // importMessages is now responsible for closing.
return token, false, nil return token, nil
} }
// importMessages imports the messages from zip/tgz file f. // importMessages imports the messages from zip/tgz file f.
// importMessages is responsible for unlocking and closing acc, and closing tx and f. // importMessages is responsible for unlocking and closing acc, and closing tx and f.
func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.Account, tx *bstore.Tx, zr *zip.Reader, tr *tar.Reader, f *os.File, skipMailboxPrefix string) { func importMessages(ctx context.Context, log *mlog.Log, token string, acc *store.Account, tx *bstore.Tx, zr *zip.Reader, tr *tar.Reader, f *os.File, skipMailboxPrefix string) {
// If a fatal processing error occurs, we panic with this type. // If a fatal processing error occurs, we panic with this type.
type importError struct{ Err error } type importError struct{ Err error }
@ -290,18 +278,29 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
var changes []store.Change var changes []store.Change
// ID's of delivered messages. If we have to rollback, we have to remove this files. // ID's of delivered messages. If we have to rollback, we have to remove this files.
var newIDs []int64 var deliveredIDs []int64
ximportcheckf := func(err error, format string, args ...any) {
if err != nil {
panic(importError{fmt.Errorf("%s: %s", fmt.Sprintf(format, args...), err)})
}
}
sendEvent := func(kind string, v any) { sendEvent := func(kind string, v any) {
buf, err := json.Marshal(v) buf, err := json.Marshal(v)
if err != nil { if err != nil {
log.Errorx("marshal event", err, slog.String("kind", kind), slog.Any("event", v)) log.Errorx("marshal event", err, mlog.Field("kind", kind), mlog.Field("event", v))
return return
} }
ssemsg := fmt.Sprintf("event: %s\ndata: %s\n\n", kind, buf) ssemsg := fmt.Sprintf("event: %s\ndata: %s\n\n", kind, buf)
importers.Events <- importEvent{token, []byte(ssemsg), v, nil} importers.Events <- importEvent{token, []byte(ssemsg), v, nil}
} }
problemf := func(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
sendEvent("problem", importProblem{Message: msg})
}
canceled := func() bool { canceled := func() bool {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -312,18 +311,14 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
} }
} }
problemf := func(format string, args ...any) {
msg := fmt.Sprintf(format, args...)
sendEvent("problem", importProblem{Message: msg})
}
defer func() { defer func() {
store.CloseRemoveTempFile(log, f, "uploaded messages") err := f.Close()
log.Check(err, "closing uploaded messages file")
for _, id := range newIDs { for _, id := range deliveredIDs {
p := acc.MessagePath(id) p := acc.MessagePath(id)
err := os.Remove(p) err := os.Remove(p)
log.Check(err, "closing message file after import error", slog.String("path", p)) log.Check(err, "closing message file after import error", mlog.Field("path", p))
} }
if tx != nil { if tx != nil {
err := tx.Rollback() err := tx.Rollback()
@ -344,21 +339,11 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
problemf("%s (aborting)", err.Err) problemf("%s (aborting)", err.Err)
sendEvent("aborted", importAborted{}) sendEvent("aborted", importAborted{})
} else { } else {
log.Error("import panic", slog.Any("err", x)) log.Error("import panic", mlog.Field("err", x))
debug.PrintStack() debug.PrintStack()
metrics.PanicInc(metrics.Importmessages)
} }
}() }()
ximportcheckf := func(err error, format string, args ...any) {
if err != nil {
panic(importError{fmt.Errorf("%s: %s", fmt.Sprintf(format, args...), err)})
}
}
err := acc.ThreadingWait(log)
ximportcheckf(err, "waiting for account thread upgrade")
conf, _ := acc.Conf() conf, _ := acc.Conf()
jf, _, err := acc.OpenJunkFilter(ctx, log) jf, _, err := acc.OpenJunkFilter(ctx, log)
@ -373,21 +358,10 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
}() }()
// Mailboxes we imported, and message counts. // Mailboxes we imported, and message counts.
mailboxNames := map[string]*store.Mailbox{} mailboxes := map[string]store.Mailbox{}
mailboxIDs := map[int64]*store.Mailbox{}
mailboxKeywordCounts := map[int64]int{}
messages := map[string]int{} messages := map[string]int{}
maxSize := acc.QuotaMessageSize() // For maildirs, we are likely to get a possible dovecot-keywords file after having imported the messages. Once we see the keywords, we use them. But before that time we remember which messages miss a keywords. Once the keywords become available, we'll fix up the flags for the unknown messages
du := store.DiskUsage{ID: 1}
err = tx.Get(&du)
ximportcheckf(err, "get disk usage")
var addSize int64
// For maildirs, we are likely to get a possible dovecot-keywords file after having
// imported the messages. Once we see the keywords, we use them. But before that
// time we remember which messages miss keywords. Once the keywords become
// available, we'll fix up the flags for the unknown messages
mailboxKeywords := map[string]map[rune]string{} // Mailbox to 'a'-'z' to flag name. mailboxKeywords := map[string]map[rune]string{} // Mailbox to 'a'-'z' to flag name.
mailboxMissingKeywordMessages := map[string]map[int64]string{} // Mailbox to message id to string consisting of the unrecognized flags. mailboxMissingKeywordMessages := map[string]map[int64]string{} // Mailbox to message id to string consisting of the unrecognized flags.
@ -396,8 +370,6 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
// finally at the end as a closing statement. // finally at the end as a closing statement.
var prevMailbox string var prevMailbox string
var modseq store.ModSeq // Assigned on first message, used for all messages.
trainMessage := func(m *store.Message, p message.Part, pos string) { trainMessage := func(m *store.Message, p message.Part, pos string) {
words, err := jf.ParseMessage(p) words, err := jf.ParseMessage(p)
if err != nil { if err != nil {
@ -431,104 +403,82 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
trainMessage(m, p, fmt.Sprintf("message id %d", m.ID)) trainMessage(m, p, fmt.Sprintf("message id %d", m.ID))
} }
xensureMailbox := func(name string) *store.Mailbox { xensureMailbox := func(name string) store.Mailbox {
// Ensure name is normalized.
name = norm.NFC.String(name) name = norm.NFC.String(name)
name, _, err := store.CheckMailboxName(name, true) if strings.ToLower(name) == "inbox" {
ximportcheckf(err, "checking mailbox name") name = "Inbox"
}
if mb, ok := mailboxNames[name]; ok { if mb, ok := mailboxes[name]; ok {
return mb return mb
} }
var p string var p string
var mb *store.Mailbox var mb store.Mailbox
var parent store.Mailbox
for i, e := range strings.Split(name, "/") { for i, e := range strings.Split(name, "/") {
if i == 0 { if i == 0 {
p = e p = e
} else { } else {
p = path.Join(p, e) p = path.Join(p, e)
} }
if _, ok := mailboxNames[p]; ok { if _, ok := mailboxes[p]; ok {
continue continue
} }
mb, err = acc.MailboxFind(tx, p) q := bstore.QueryTx[store.Mailbox](tx)
ximportcheckf(err, "looking up mailbox %s to import to (aborting)", p) q.FilterNonzero(store.Mailbox{Name: p})
if mb == nil { var err error
mb, err = q.Get()
if err == bstore.ErrAbsent {
uidvalidity, err := acc.NextUIDValidity(tx) uidvalidity, err := acc.NextUIDValidity(tx)
ximportcheckf(err, "finding next uid validity") ximportcheckf(err, "finding next uid validity")
mb = store.Mailbox{
if modseq == 0 {
var err error
modseq, err = acc.NextModSeq(tx)
ximportcheckf(err, "assigning next modseq")
}
mb = &store.Mailbox{
CreateSeq: modseq,
ModSeq: modseq,
ParentID: parent.ID,
Name: p, Name: p,
UIDValidity: uidvalidity, UIDValidity: uidvalidity,
UIDNext: 1, UIDNext: 1,
HaveCounts: true,
// Do not assign special-use flags. This existing account probably already has such mailboxes. // Do not assign special-use flags. This existing account probably already has such mailboxes.
} }
err = tx.Insert(mb) err = tx.Insert(&mb)
ximportcheckf(err, "inserting mailbox in database") ximportcheckf(err, "inserting mailbox in database")
parent = *mb
if tx.Get(&store.Subscription{Name: p}) != nil { err = tx.Insert(&store.Subscription{Name: p})
err := tx.Insert(&store.Subscription{Name: p}) if err != nil && !errors.Is(err, bstore.ErrUnique) {
ximportcheckf(err, "subscribing to imported mailbox") ximportcheckf(err, "subscribing to imported mailbox")
} }
changes = append(changes, store.ChangeAddMailbox{Mailbox: *mb, Flags: []string{`\Subscribed`}}) changes = append(changes, store.ChangeAddMailbox{Name: p, Flags: []string{`\Subscribed`}})
} else if err != nil {
ximportcheckf(err, "creating mailbox %s (aborting)", p)
} }
if prevMailbox != "" && mb.Name != prevMailbox { if prevMailbox != "" && mb.Name != prevMailbox {
sendEvent("count", importCount{prevMailbox, messages[prevMailbox]}) sendEvent("count", importCount{prevMailbox, messages[prevMailbox]})
} }
mailboxKeywordCounts[mb.ID] = len(mb.Keywords) mailboxes[mb.Name] = mb
mailboxNames[mb.Name] = mb
mailboxIDs[mb.ID] = mb
sendEvent("count", importCount{mb.Name, 0}) sendEvent("count", importCount{mb.Name, 0})
prevMailbox = mb.Name prevMailbox = mb.Name
} }
return mb return mb
} }
xdeliver := func(mb *store.Mailbox, m *store.Message, f *os.File, pos string) { xdeliver := func(mb store.Mailbox, m *store.Message, f *os.File, pos string) {
defer store.CloseRemoveTempFile(log, f, "message file for import") defer func() {
if f != nil {
err := os.Remove(f.Name())
log.Check(err, "removing temporary message file for delivery")
err = f.Close()
log.Check(err, "closing temporary message file for delivery")
}
}()
m.MailboxID = mb.ID m.MailboxID = mb.ID
m.MailboxOrigID = mb.ID m.MailboxOrigID = mb.ID
addSize += m.Size
if maxSize > 0 && du.MessageSize+addSize > maxSize {
ximportcheckf(fmt.Errorf("account over maximum total size %d", maxSize), "checking quota")
}
if modseq == 0 {
var err error
modseq, err = acc.NextModSeq(tx)
ximportcheckf(err, "assigning next modseq")
}
m.CreateSeq = modseq
m.ModSeq = modseq
// Parse message and store parsed information for later fast retrieval. // Parse message and store parsed information for later fast retrieval.
p, err := message.EnsurePart(log.Logger, false, f, m.Size) p, err := message.EnsurePart(f, m.Size)
if err != nil { if err != nil {
problemf("parsing message %s: %s (continuing)", pos, err) problemf("parsing message %s: %s (continuing)", pos, err)
} }
m.ParsedBuf, err = json.Marshal(p) m.ParsedBuf, err = json.Marshal(p)
ximportcheckf(err, "marshal parsed message structure") ximportcheckf(err, "marshal parsed message structure")
// Set fields needed for future threading. By doing it now, MessageAdd won't
// have to parse the Part again.
p.SetReaderAt(store.FileMsgReader(m.MsgPrefix, f))
m.PrepareThreading(log, &p)
if m.Received.IsZero() { if m.Received.IsZero() {
if p.Envelope != nil && !p.Envelope.Date.IsZero() { if p.Envelope != nil && !p.Envelope.Date.IsZero() {
m.Received = p.Envelope.Date m.Received = p.Envelope.Date
@ -540,30 +490,26 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
// We set the flags that Deliver would set now and train ourselves. This prevents // We set the flags that Deliver would set now and train ourselves. This prevents
// Deliver from training, which would open the junk filter, change it, and write it // Deliver from training, which would open the junk filter, change it, and write it
// back to disk, for each message (slow). // back to disk, for each message (slow).
m.JunkFlagsForMailbox(*mb, conf) m.JunkFlagsForMailbox(mb.Name, conf)
if jf != nil && m.NeedsTraining() { if jf != nil && m.NeedsTraining() {
trainMessage(m, p, pos) trainMessage(m, p, pos)
} }
opts := store.AddOpts{ const consumeFile = true
SkipDirSync: true, const sync = false
SkipTraining: true, const notrain = true
SkipThreads: true, if err := acc.DeliverMessage(log, tx, m, f, consumeFile, mb.Sent, sync, notrain); err != nil {
SkipUpdateDiskUsage: true,
SkipCheckQuota: true,
SkipPreview: true,
}
if err := acc.MessageAdd(log, tx, mb, m, f, opts); err != nil {
problemf("delivering message %s: %s (continuing)", pos, err) problemf("delivering message %s: %s (continuing)", pos, err)
return return
} }
newIDs = append(newIDs, m.ID) deliveredIDs = append(deliveredIDs, m.ID)
changes = append(changes, m.ChangeAddUID(*mb)) changes = append(changes, store.ChangeAddUID{MailboxID: m.MailboxID, UID: m.UID, Flags: m.Flags})
messages[mb.Name]++ messages[mb.Name]++
if messages[mb.Name]%100 == 0 || prevMailbox != mb.Name { if messages[mb.Name]%100 == 0 || prevMailbox != mb.Name {
prevMailbox = mb.Name prevMailbox = mb.Name
sendEvent("count", importCount{mb.Name, messages[mb.Name]}) sendEvent("count", importCount{mb.Name, messages[mb.Name]})
} }
f = nil
} }
ximportMbox := func(mailbox, filename string, r io.Reader) { ximportMbox := func(mailbox, filename string, r io.Reader) {
@ -573,7 +519,7 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
} }
mb := xensureMailbox(mailbox) mb := xensureMailbox(mailbox)
mr := store.NewMboxReader(log, store.CreateMessageTemp, filename, r) mr := store.NewMboxReader(store.CreateMessageTemp, filename, r, log)
for { for {
m, mf, pos, err := mr.Next() m, mf, pos, err := mr.Next()
if err == io.EOF { if err == io.EOF {
@ -593,11 +539,14 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
} }
mb := xensureMailbox(mailbox) mb := xensureMailbox(mailbox)
f, err := store.CreateMessageTemp(log, "import") f, err := store.CreateMessageTemp("import")
ximportcheckf(err, "creating temp message") ximportcheckf(err, "creating temp message")
defer func() { defer func() {
if f != nil { if f != nil {
store.CloseRemoveTempFile(log, f, "message to import") err := os.Remove(f.Name())
log.Check(err, "removing temporary file for delivery")
err = f.Close()
log.Check(err, "closing temporary file for delivery")
} }
}() }()
@ -634,8 +583,7 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
// Parse flags. See https://cr.yp.to/proto/maildir.html. // Parse flags. See https://cr.yp.to/proto/maildir.html.
var keepFlags string var keepFlags string
var flags store.Flags flags := store.Flags{}
keywords := map[string]bool{}
t = strings.SplitN(path.Base(filename), ":2,", 2) t = strings.SplitN(path.Base(filename), ":2,", 2)
if len(t) == 2 { if len(t) == 2 {
for _, c := range t[1] { for _, c := range t[1] {
@ -654,12 +602,12 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
flags.Flagged = true flags.Flagged = true
default: default:
if c >= 'a' && c <= 'z' { if c >= 'a' && c <= 'z' {
dovecotKeywords, ok := mailboxKeywords[mailbox] keywords, ok := mailboxKeywords[mailbox]
if !ok { if !ok {
// No keywords file seen yet, we'll try later if it comes in. // No keywords file seen yet, we'll try later if it comes in.
keepFlags += string(c) keepFlags += string(c)
} else if kw, ok := dovecotKeywords[c]; ok { } else if kw, ok := keywords[c]; ok {
flagSet(&flags, keywords, kw) flagSet(&flags, strings.ToLower(kw))
} }
} }
} }
@ -669,7 +617,6 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
m := store.Message{ m := store.Message{
Received: received, Received: received,
Flags: flags, Flags: flags,
Keywords: slices.Sorted(maps.Keys(keywords)),
Size: size, Size: size,
} }
xdeliver(mb, &m, f, filename) xdeliver(mb, &m, f, filename)
@ -713,63 +660,47 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
case "new", "cur", "tmp": case "new", "cur", "tmp":
mailbox := path.Dir(dir) mailbox := path.Dir(dir)
ximportMaildir(mailbox, origName, r) ximportMaildir(mailbox, origName, r)
return default:
} if path.Base(name) == "dovecot-keywords" {
mailbox := path.Dir(name)
if path.Base(name) != "dovecot-keywords" { keywords := map[rune]string{}
problemf("unrecognized file %s (skipping)", origName) words, err := store.ParseDovecotKeywords(r, log)
return log.Check(err, "parsing dovecot keywords for mailbox", mlog.Field("mailbox", mailbox))
} for i, kw := range words {
keywords['a'+rune(i)] = kw
// Handle dovecot-keywords.
mailbox := path.Dir(name)
dovecotKeywords := map[rune]string{}
words, err := store.ParseDovecotKeywordsFlags(r, log)
log.Check(err, "parsing dovecot keywords for mailbox", slog.String("mailbox", mailbox))
for i, kw := range words {
dovecotKeywords['a'+rune(i)] = kw
}
mailboxKeywords[mailbox] = dovecotKeywords
for id, chars := range mailboxMissingKeywordMessages[mailbox] {
var flags, zeroflags store.Flags
keywords := map[string]bool{}
for _, c := range chars {
kw, ok := dovecotKeywords[c]
if !ok {
problemf("unspecified dovecot message flag %c for message id %d (continuing)", c, id)
continue
} }
flagSet(&flags, keywords, kw) mailboxKeywords[mailbox] = keywords
for id, chars := range mailboxMissingKeywordMessages[mailbox] {
var flags, zeroflags store.Flags
for _, c := range chars {
kw, ok := keywords[c]
if !ok {
problemf("unspecified message flag %c for message id %d (continuing)", c, id)
continue
}
flagSet(&flags, strings.ToLower(kw))
}
if flags == zeroflags {
continue
}
m := store.Message{ID: id}
err := tx.Get(&m)
ximportcheckf(err, "get imported message for flag update")
m.Flags = m.Flags.Set(flags, flags)
// We train before updating, training may set m.TrainedJunk.
if jf != nil && m.NeedsTraining() {
openTrainMessage(&m)
}
err = tx.Update(&m)
ximportcheckf(err, "updating message after flag update")
changes = append(changes, store.ChangeFlags{MailboxID: m.MailboxID, UID: m.UID, Mask: flags, Flags: flags})
}
delete(mailboxMissingKeywordMessages, mailbox)
} else {
problemf("unrecognized file %s (skipping)", origName)
} }
if flags == zeroflags && len(keywords) == 0 {
continue
}
m := store.Message{ID: id}
err := tx.Get(&m)
ximportcheckf(err, "get imported message for flag update")
mb := mailboxIDs[m.MailboxID]
mb.Sub(m.MailboxCounts())
oflags := m.Flags
m.Flags = m.Flags.Set(flags, flags)
m.Keywords = slices.Sorted(maps.Keys(keywords))
mb.Add(m.MailboxCounts())
mb.Keywords, _ = store.MergeKeywords(mb.Keywords, m.Keywords)
// We train before updating, training may set m.TrainedJunk.
if jf != nil && m.NeedsTraining() {
openTrainMessage(&m)
}
err = tx.Update(&m)
ximportcheckf(err, "updating message after flag update")
changes = append(changes, m.ChangeFlags(oflags, *mb))
} }
delete(mailboxMissingKeywordMessages, mailbox)
} }
if zr != nil { if zr != nil {
@ -806,38 +737,17 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
for _, count := range messages { for _, count := range messages {
total += count total += count
} }
log.Debug("messages imported", slog.Int("total", total)) log.Debug("message imported", mlog.Field("total", total))
// Send final update for count of last-imported mailbox. // Send final update for count of last-imported mailbox.
if prevMailbox != "" { if prevMailbox != "" {
sendEvent("count", importCount{prevMailbox, messages[prevMailbox]}) sendEvent("count", importCount{prevMailbox, messages[prevMailbox]})
} }
// Match threads.
if len(newIDs) > 0 {
sendEvent("step", importStep{"matching messages with threads"})
err = acc.AssignThreads(ctx, log, tx, newIDs[0], 0, io.Discard)
ximportcheckf(err, "assigning messages to threads")
}
// Update mailboxes with counts and keywords.
for _, mb := range mailboxIDs {
err = tx.Update(mb)
ximportcheckf(err, "updating mailbox count and keywords")
changes = append(changes, mb.ChangeCounts())
if len(mb.Keywords) != mailboxKeywordCounts[mb.ID] {
changes = append(changes, mb.ChangeKeywords())
}
}
err = acc.AddMessageSize(log, tx, addSize)
ximportcheckf(err, "updating disk usage after import")
err = tx.Commit() err = tx.Commit()
tx = nil tx = nil
ximportcheckf(err, "commit") ximportcheckf(err, "commit")
newIDs = nil deliveredIDs = nil
if jf != nil { if jf != nil {
if err := jf.Close(); err != nil { if err := jf.Close(); err != nil {
@ -847,7 +757,9 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
jf = nil jf = nil
} }
store.BroadcastChanges(acc, changes) comm := store.RegisterComm(acc)
defer comm.Unregister()
comm.Broadcast(changes)
acc.Unlock() acc.Unlock()
err = acc.Close() err = acc.Close()
log.Check(err, "closing account after import") log.Check(err, "closing account after import")
@ -856,7 +768,9 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
sendEvent("done", importDone{}) sendEvent("done", importDone{})
} }
func flagSet(flags *store.Flags, keywords map[string]bool, word string) { func flagSet(flags *store.Flags, word string) {
// todo: custom labels, e.g. $label1, JunkRecorded?
switch word { switch word {
case "forwarded", "$forwarded": case "forwarded", "$forwarded":
flags.Forwarded = true flags.Forwarded = true
@ -868,9 +782,5 @@ func flagSet(flags *store.Flags, keywords map[string]bool, word string) {
flags.Phishing = true flags.Phishing = true
case "mdnsent", "$mdnsent": case "mdnsent", "$mdnsent":
flags.MDNSent = true flags.MDNSent = true
default:
if err := store.CheckKeyword(word); err == nil {
keywords[word] = true
}
} }
} }

View File

@ -1,17 +0,0 @@
package http
import (
"fmt"
"os"
"testing"
"github.com/mjl-/mox/metrics"
)
func TestMain(m *testing.M) {
m.Run()
if metrics.Panics.Load() > 0 {
fmt.Println("unhandled panics encountered")
os.Exit(2)
}
}

View File

@ -1,205 +0,0 @@
package http
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/xml"
"fmt"
"maps"
"slices"
"strings"
"github.com/mjl-/mox/admin"
"github.com/mjl-/mox/smtp"
)
// Apple software isn't good at autoconfig/autodiscovery, but it can import a
// device management profile containing account settings.
//
// See https://developer.apple.com/documentation/devicemanagement/mail.
type deviceManagementProfile struct {
XMLName xml.Name `xml:"plist"`
Version string `xml:"version,attr"`
Dict dict `xml:"dict"`
}
type array []dict
type dict map[string]any
// MarshalXML marshals as <dict> with multiple pairs of <key> and a value of various types.
func (m dict) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
// The plist format isn't that easy to generate with Go's xml package, it's leaving
// out reasonable structure, instead just concatenating key/value pairs. Perhaps
// there is a better way?
if err := e.EncodeToken(xml.StartElement{Name: xml.Name{Local: "dict"}}); err != nil {
return err
}
l := slices.Sorted(maps.Keys(m))
for _, k := range l {
tokens := []xml.Token{
xml.StartElement{Name: xml.Name{Local: "key"}},
xml.CharData([]byte(k)),
xml.EndElement{Name: xml.Name{Local: "key"}},
}
for _, t := range tokens {
if err := e.EncodeToken(t); err != nil {
return err
}
}
tokens = nil
switch v := m[k].(type) {
case string:
tokens = []xml.Token{
xml.StartElement{Name: xml.Name{Local: "string"}},
xml.CharData([]byte(v)),
xml.EndElement{Name: xml.Name{Local: "string"}},
}
case int:
tokens = []xml.Token{
xml.StartElement{Name: xml.Name{Local: "integer"}},
xml.CharData(fmt.Appendf(nil, "%d", v)),
xml.EndElement{Name: xml.Name{Local: "integer"}},
}
case bool:
tag := "false"
if v {
tag = "true"
}
tokens = []xml.Token{
xml.StartElement{Name: xml.Name{Local: tag}},
xml.EndElement{Name: xml.Name{Local: tag}},
}
case array:
if err := e.EncodeToken(xml.StartElement{Name: xml.Name{Local: "array"}}); err != nil {
return err
}
for _, d := range v {
if err := d.MarshalXML(e, xml.StartElement{Name: xml.Name{Local: "array"}}); err != nil {
return err
}
}
if err := e.EncodeToken(xml.EndElement{Name: xml.Name{Local: "array"}}); err != nil {
return err
}
default:
return fmt.Errorf("unexpected dict value of type %T", v)
}
for _, t := range tokens {
if err := e.EncodeToken(t); err != nil {
return err
}
}
}
if err := e.EncodeToken(xml.EndElement{Name: xml.Name{Local: "dict"}}); err != nil {
return err
}
return nil
}
// MobileConfig returns a device profile for a macOS Mail email account. The file
// should have a .mobileconfig extension. Opening the file adds it to Profiles in
// System Preferences, where it can be installed. This profile does not contain a
// password because sending opaque files containing passwords around to users seems
// like bad security practice.
//
// Multiple addresses can be passed, the first is used for IMAP/submission login,
// and likely seen as primary account by Apple software.
//
// The config is not signed, so users must ignore warnings about unsigned profiles.
func MobileConfig(addresses []string, fullName string) ([]byte, error) {
if len(addresses) == 0 {
return nil, fmt.Errorf("need at least 1 address")
}
addr, err := smtp.ParseAddress(addresses[0])
if err != nil {
return nil, fmt.Errorf("parsing address: %v", err)
}
config, err := admin.ClientConfigDomain(addr.Domain)
if err != nil {
return nil, fmt.Errorf("getting config for domain: %v", err)
}
// Apple software wants identifiers...
t := strings.Split(addr.Domain.Name(), ".")
slices.Reverse(t)
reverseAddr := strings.Join(t, ".") + "." + addr.Localpart.String()
// Apple software wants UUIDs... We generate them deterministically based on address
// and our code (through key, which we must change if code changes).
const key = "mox0"
uuid := func(prefix string) string {
mac := hmac.New(sha256.New, []byte(key))
mac.Write([]byte(prefix + "\n" + "\n" + strings.Join(addresses, ",")))
sum := mac.Sum(nil)
uuid := fmt.Sprintf("%x-%x-%x-%x-%x", sum[0:4], sum[4:6], sum[6:8], sum[8:10], sum[10:16])
return uuid
}
uuidConfig := uuid("config")
uuidAccount := uuid("account")
// The "UseSSL" fields are underspecified in Apple's format. They say "If true,
// enables SSL for authentication on the incoming mail server.". I'm assuming they
// want to know if they should start immediately with a handshake, instead of
// starting out plain. There is no way to require STARTTLS though. You could even
// interpret their wording as this field enable authentication through client-side
// TLS certificates, given their "on the incoming mail server", instead of "of the
// incoming mail server".
var w bytes.Buffer
p := deviceManagementProfile{
Version: "1.0",
Dict: dict(map[string]any{
"PayloadDisplayName": fmt.Sprintf("%s email account", addresses[0]),
"PayloadIdentifier": reverseAddr + ".email",
"PayloadType": "Configuration",
"PayloadUUID": uuidConfig,
"PayloadVersion": 1,
"PayloadContent": array{
dict(map[string]any{
"EmailAccountDescription": addresses[0],
"EmailAccountName": fullName,
"EmailAccountType": "EmailTypeIMAP",
// Comma-separated multiple addresses are not documented at Apple, but seem to
// work.
"EmailAddress": strings.Join(addresses, ","),
"IncomingMailServerAuthentication": "EmailAuthCRAMMD5", // SCRAM not an option at time of writing..
"IncomingMailServerUsername": addresses[0],
"IncomingMailServerHostName": config.IMAP.Host.ASCII,
"IncomingMailServerPortNumber": config.IMAP.Port,
"IncomingMailServerUseSSL": config.IMAP.TLSMode == admin.TLSModeImmediate,
"OutgoingMailServerAuthentication": "EmailAuthCRAMMD5", // SCRAM not an option at time of writing...
"OutgoingMailServerHostName": config.Submission.Host.ASCII,
"OutgoingMailServerPortNumber": config.Submission.Port,
"OutgoingMailServerUsername": addresses[0],
"OutgoingMailServerUseSSL": config.Submission.TLSMode == admin.TLSModeImmediate,
"OutgoingPasswordSameAsIncomingPassword": true,
"PayloadIdentifier": reverseAddr + ".email.account",
"PayloadType": "com.apple.mail.managed",
"PayloadUUID": uuidAccount,
"PayloadVersion": 1,
}),
},
}),
}
if _, err := fmt.Fprint(&w, xml.Header); err != nil {
return nil, err
}
if _, err := fmt.Fprint(&w, "<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"); err != nil {
return nil, err
}
enc := xml.NewEncoder(&w)
enc.Indent("", "\t")
if err := enc.Encode(p); err != nil {
return nil, err
}
if _, err := fmt.Fprintln(&w); err != nil {
return nil, err
}
return w.Bytes(), nil
}

View File

@ -1,7 +1,6 @@
package http package http
import ( import (
"log/slog"
"net" "net"
"net/http" "net/http"
"strings" "strings"
@ -14,8 +13,8 @@ import (
) )
func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) { func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
log := func() mlog.Log { log := func() *mlog.Log {
return pkglog.WithContext(r.Context()) return xlog.WithContext(r.Context())
} }
host := strings.ToLower(r.Host) host := strings.ToLower(r.Host)
@ -31,7 +30,7 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
} }
domain, err := dns.ParseDomain(host) domain, err := dns.ParseDomain(host)
if err != nil { if err != nil {
log().Errorx("mtasts policy request: bad domain", err, slog.String("host", host)) log().Errorx("mtasts policy request: bad domain", err, mlog.Field("host", host))
http.NotFound(w, r) http.NotFound(w, r)
return return
} }
@ -43,16 +42,16 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
return return
} }
var mxs []mtasts.MX var mxs []mtasts.STSMX
for _, s := range sts.MX { for _, s := range sts.MX {
var mx mtasts.MX var mx mtasts.STSMX
if strings.HasPrefix(s, "*.") { if strings.HasPrefix(s, "*.") {
mx.Wildcard = true mx.Wildcard = true
s = s[2:] s = s[2:]
} }
d, err := dns.ParseDomain(s) d, err := dns.ParseDomain(s)
if err != nil { if err != nil {
log().Errorx("bad domain in mtasts config", err, slog.String("domain", s)) log().Errorx("bad domain in mtasts config", err, mlog.Field("domain", s))
http.Error(w, "500 - internal server error - invalid domain in configuration", http.StatusInternalServerError) http.Error(w, "500 - internal server error - invalid domain in configuration", http.StatusInternalServerError)
return return
} }
@ -60,7 +59,7 @@ func mtastsPolicyHandle(w http.ResponseWriter, r *http.Request) {
mxs = append(mxs, mx) mxs = append(mxs, mx)
} }
if len(mxs) == 0 { if len(mxs) == 0 {
mxs = []mtasts.MX{{Domain: mox.Conf.Static.HostnameDomain}} mxs = []mtasts.STSMX{{Domain: mox.Conf.Static.HostnameDomain}}
} }
policy := mtasts.Policy{ policy := mtasts.Policy{

File diff suppressed because it is too large Load Diff

View File

@ -6,19 +6,33 @@ import (
"net/http/httptest" "net/http/httptest"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"testing" "testing"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mox-" "github.com/mjl-/mox/mox-"
) )
func TestServeHTTP(t *testing.T) { func TestServeHTTP(t *testing.T) {
os.RemoveAll("../testdata/web/data") os.RemoveAll("../testdata/web/data")
mox.ConfigStaticPath = filepath.FromSlash("../testdata/web/mox.conf") mox.ConfigStaticPath = "../testdata/web/mox.conf"
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf") mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
mox.MustLoadConfig(true, false) mox.MustLoadConfig(false)
portSrvs := portServes("local", mox.Conf.Static.Listeners["local"]) srv := &serve{
srv := portSrvs[80] PathHandlers: []pathHandler{
{
HostMatch: func(dom dns.Domain) bool {
return strings.HasPrefix(dom.ASCII, "mta-sts.")
},
Path: "/.well-known/mta-sts.txt",
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("mta-sts!"))
}),
},
},
Webserver: true,
}
test := func(method, target string, expCode int, expContent string, expHeaders map[string]string) { test := func(method, target string, expCode int, expContent string, expHeaders map[string]string) {
t.Helper() t.Helper()
@ -29,22 +43,22 @@ func TestServeHTTP(t *testing.T) {
srv.ServeHTTP(rw, req) srv.ServeHTTP(rw, req)
resp := rw.Result() resp := rw.Result()
if resp.StatusCode != expCode { if resp.StatusCode != expCode {
t.Errorf("got statuscode %d, expected %d", resp.StatusCode, expCode) t.Fatalf("got statuscode %d, expected %d", resp.StatusCode, expCode)
} }
if expContent != "" { if expContent != "" {
s := rw.Body.String() s := rw.Body.String()
if s != expContent { if s != expContent {
t.Errorf("got response data %q, expected %q", s, expContent) t.Fatalf("got response data %q, expected %q", s, expContent)
} }
} }
for k, v := range expHeaders { for k, v := range expHeaders {
if xv := resp.Header.Get(k); xv != v { if xv := resp.Header.Get(k); xv != v {
t.Errorf("got %q for header %q, expected %q", xv, k, v) t.Fatalf("got %q for header %q, expected %q", xv, k, v)
} }
} }
} }
test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "version: STSv1\nmode: enforce\nmax_age: 86400\nmx: mox.example\n", nil) test("GET", "http://mta-sts.mox.example/.well-known/mta-sts.txt", http.StatusOK, "mta-sts!", nil)
test("GET", "http://mox.example/.well-known/mta-sts.txt", http.StatusNotFound, "", nil) // mta-sts endpoint not in this domain. test("GET", "http://mox.example/.well-known/mta-sts.txt", http.StatusNotFound, "", nil) // mta-sts endpoint not in this domain.
test("GET", "http://mta-sts.mox.example/static/", http.StatusNotFound, "", nil) // static not served on this domain. test("GET", "http://mta-sts.mox.example/static/", http.StatusNotFound, "", nil) // static not served on this domain.
test("GET", "http://mta-sts.mox.example/other", http.StatusNotFound, "", nil) test("GET", "http://mta-sts.mox.example/other", http.StatusNotFound, "", nil)
@ -52,24 +66,4 @@ func TestServeHTTP(t *testing.T) {
test("GET", "http://mox.example/static/index.html", http.StatusOK, "html\n", map[string]string{"X-Test": "mox"}) test("GET", "http://mox.example/static/index.html", http.StatusOK, "html\n", map[string]string{"X-Test": "mox"})
test("GET", "http://mox.example/static/dir/", http.StatusOK, "", map[string]string{"X-Test": "mox"}) // Dir listing. test("GET", "http://mox.example/static/dir/", http.StatusOK, "", map[string]string{"X-Test": "mox"}) // Dir listing.
test("GET", "http://mox.example/other", http.StatusNotFound, "", nil) test("GET", "http://mox.example/other", http.StatusNotFound, "", nil)
// Webmail on IP, localhost, mail host, clientsettingsdomain, not others.
test("GET", "http://127.0.0.1/webmail/", http.StatusOK, "", nil)
test("GET", "http://localhost/webmail/", http.StatusOK, "", nil)
test("GET", "http://mox.example/webmail/", http.StatusOK, "", nil)
test("GET", "http://mail.mox.example/webmail/", http.StatusOK, "", nil)
test("GET", "http://mail.other.example/webmail/", http.StatusNotFound, "", nil)
test("GET", "http://remotehost/webmail/", http.StatusNotFound, "", nil)
// admin on IP, localhost, mail host, not clientsettingsdomain.
test("GET", "http://127.0.0.1/admin/", http.StatusOK, "", nil)
test("GET", "http://localhost/admin/", http.StatusOK, "", nil)
test("GET", "http://mox.example/admin/", http.StatusPermanentRedirect, "", nil) // Override by WebHandler.
test("GET", "http://mail.mox.example/admin/", http.StatusNotFound, "", nil)
// account is off.
test("GET", "http://127.0.0.1/", http.StatusNotFound, "", nil)
test("GET", "http://localhost/", http.StatusNotFound, "", nil)
test("GET", "http://mox.example/", http.StatusNotFound, "", nil)
test("GET", "http://mail.mox.example/", http.StatusNotFound, "", nil)
} }

View File

@ -11,9 +11,7 @@ import (
"fmt" "fmt"
htmltemplate "html/template" htmltemplate "html/template"
"io" "io"
"io/fs"
golog "log" golog "log"
"log/slog"
"net" "net"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
@ -30,6 +28,7 @@ import (
"github.com/mjl-/mox/dns" "github.com/mjl-/mox/dns"
"github.com/mjl-/mox/mlog" "github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/mox-" "github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/moxio"
) )
func recvid(r *http.Request) string { func recvid(r *http.Request) string {
@ -45,13 +44,11 @@ func recvid(r *http.Request) string {
// WebHandle runs after the built-in handlers for mta-sts, autoconfig, etc. // WebHandle runs after the built-in handlers for mta-sts, autoconfig, etc.
// If no handler matched, false is returned. // If no handler matched, false is returned.
// WebHandle sets w.Name to that of the matching handler. // WebHandle sets w.Name to that of the matching handler.
func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bool) { func WebHandle(w *loggingWriter, r *http.Request, host dns.Domain) (handled bool) {
conf := mox.Conf.DynamicConfig() redirects, handlers := mox.Conf.WebServer()
redirects := conf.WebDNSDomainRedirects
handlers := conf.WebHandlers
for from, to := range redirects { for from, to := range redirects {
if host.Domain != from { if host != from {
continue continue
} }
u := r.URL u := r.URL
@ -63,7 +60,7 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bo
} }
for _, h := range handlers { for _, h := range handlers {
if host.Domain != h.DNSDomain { if host != h.DNSDomain {
continue continue
} }
loc := h.Path.FindStringIndex(r.URL.Path) loc := h.Path.FindStringIndex(r.URL.Path)
@ -79,14 +76,11 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bo
u.Scheme = "https" u.Scheme = "https"
u.Host = h.DNSDomain.Name() u.Host = h.DNSDomain.Name()
w.Handler = h.Name w.Handler = h.Name
w.Compress = h.Compress
http.Redirect(w, r, u.String(), http.StatusPermanentRedirect) http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
return true return true
} }
// We don't want the loggingWriter to override the static handler's decisions to compress. if h.WebStatic != nil && HandleStatic(h.WebStatic, w, r) {
w.Compress = h.Compress
if h.WebStatic != nil && HandleStatic(h.WebStatic, h.Compress, w, r) {
w.Handler = h.Name w.Handler = h.Name
return true return true
} }
@ -98,12 +92,7 @@ func WebHandle(w *loggingWriter, r *http.Request, host dns.IPDomain) (handled bo
w.Handler = h.Name w.Handler = h.Name
return true return true
} }
if h.WebInternal != nil && HandleInternal(h.WebInternal, w, r) {
w.Handler = h.Name
return true
}
} }
w.Compress = false
return false return false
} }
@ -154,9 +143,9 @@ table > tbody > tr:nth-child(odd) { background-color: #f8f8f8; }
// slash is written. If a directory is requested and an index.html exists, that // slash is written. If a directory is requested and an index.html exists, that
// file is returned. Otherwise, for directories with ListFiles configured, a // file is returned. Otherwise, for directories with ListFiles configured, a
// directory listing is returned. // directory listing is returned.
func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *http.Request) (handled bool) { func HandleStatic(h *config.WebStatic, w http.ResponseWriter, r *http.Request) (handled bool) {
log := func() mlog.Log { log := func() *mlog.Log {
return pkglog.WithContext(r.Context()) return xlog.WithContext(r.Context())
} }
if r.Method != "GET" && r.Method != "HEAD" { if r.Method != "GET" && r.Method != "HEAD" {
if h.ContinueNotFound { if h.ContinueNotFound {
@ -185,24 +174,13 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
// fspath will not have a trailing slash anymore, we'll correct for it // fspath will not have a trailing slash anymore, we'll correct for it
// later when the path turns out to be file instead of a directory. // later when the path turns out to be file instead of a directory.
serveFile := func(name string, fi fs.FileInfo, content *os.File) { serveFile := func(name string, mtime time.Time, content *os.File) {
// ServeContent only sets a content-type if not already present in the response headers. // ServeContent only sets a content-type if not already present in the response headers.
hdr := w.Header() hdr := w.Header()
for k, v := range h.ResponseHeaders { for k, v := range h.ResponseHeaders {
hdr.Add(k, v) hdr.Add(k, v)
} }
// We transparently compress here, but still use ServeContent, because it handles http.ServeContent(w, r, name, mtime, content)
// conditional requests, range requests. It's a bit of a hack, but on first write
// to staticgzcacheReplacer where we are compressing, we write the full compressed
// file instead, and return an error to ServeContent so it stops. We still have all
// the useful behaviour (status code and headers) from ServeContent.
xw := w
if compress && acceptsGzip(r) && compressibleContent(content) {
xw = &staticgzcacheReplacer{w, r, content.Name(), content, fi.ModTime(), fi.Size(), 0, false}
} else {
w.(*loggingWriter).Compress = false
}
http.ServeContent(xw, r, name, fi.ModTime(), content)
} }
f, err := os.Open(fspath) f, err := os.Open(fspath)
@ -214,45 +192,35 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
} }
http.NotFound(w, r) http.NotFound(w, r)
return true return true
} else if errors.Is(err, syscall.ENAMETOOLONG) {
http.NotFound(w, r)
return true
} else if os.IsPermission(err) { } else if os.IsPermission(err) {
// If we tried opening a directory, we may not have permission to read it, but // If we tried opening a directory, we may not have permission to read it, but
// still access files inside it (execute bit), such as index.html. So try to serve it. // still access files inside it (execute bit), such as index.html. So try to serve it.
index, err := os.Open(filepath.Join(fspath, "index.html")) index, err := os.Open(filepath.Join(fspath, "index.html"))
if err != nil { if err == nil {
http.Error(w, "403 - permission denied", http.StatusForbidden) defer index.Close()
var ifi os.FileInfo
ifi, err = index.Stat()
if err != nil {
log().Errorx("stat index.html in directory we cannot list", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
return true
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
serveFile("index.html", ifi.ModTime(), index)
return true return true
} }
defer func() { http.Error(w, "403 - permission denied", http.StatusForbidden)
err := index.Close()
log().Check(err, "closing index file for serving")
}()
var ifi os.FileInfo
ifi, err = index.Stat()
if err != nil {
log().Errorx("stat index.html in directory we cannot list", err, slog.Any("url", r.URL), slog.String("fspath", fspath))
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
return true
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
serveFile("index.html", ifi, index)
return true return true
} }
log().Errorx("open file for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath)) log().Errorx("open file for static file serving", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError) http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
return true return true
} }
defer func() { defer f.Close()
if err := f.Close(); err != nil {
log().Check(err, "closing file for static file serving")
}
}()
fi, err := f.Stat() fi, err := f.Stat()
if err != nil { if err != nil {
log().Errorx("stat file for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath)) log().Errorx("stat file for static file serving", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError) http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
return true return true
} }
@ -280,22 +248,17 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
http.Error(w, "403 - permission denied", http.StatusForbidden) http.Error(w, "403 - permission denied", http.StatusForbidden)
return true return true
} else if err == nil { } else if err == nil {
defer func() { defer index.Close()
if err := index.Close(); err != nil {
log().Check(err, "closing index file for serving")
}
}()
var ifi os.FileInfo var ifi os.FileInfo
ifi, err = index.Stat() ifi, err = index.Stat()
if err == nil { if err == nil {
w.Header().Set("Content-Type", "text/html; charset=utf-8") w.Header().Set("Content-Type", "text/html; charset=utf-8")
serveFile("index.html", ifi, index) serveFile("index.html", ifi.ModTime(), index)
return true return true
} }
} }
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
log().Errorx("stat for static file serving", err, slog.Any("url", r.URL), slog.String("fspath", fspath)) log().Errorx("stat for static file serving", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError) http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
return true return true
} }
@ -336,7 +299,7 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
if err == io.EOF { if err == io.EOF {
break break
} else if err != nil { } else if err != nil {
log().Errorx("reading directory for file listing", err, slog.Any("url", r.URL), slog.String("fspath", fspath)) log().Errorx("reading directory for file listing", err, mlog.Field("url", r.URL), mlog.Field("fspath", fspath))
http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError) http.Error(w, "500 - internal server error"+recvid(r), http.StatusInternalServerError)
return true return true
} }
@ -352,13 +315,13 @@ func HandleStatic(h *config.WebStatic, compress bool, w http.ResponseWriter, r *
} }
} }
err = lsTemplate.Execute(w, map[string]any{"Files": files}) err = lsTemplate.Execute(w, map[string]any{"Files": files})
if err != nil { if err != nil && !moxio.IsClosed(err) {
log().Check(err, "executing directory listing template") log().Errorx("executing directory listing template", err)
} }
return true return true
} }
serveFile(fspath, fi, f) serveFile(fspath, fi.ModTime(), f)
return true return true
} }
@ -414,19 +377,13 @@ func HandleRedirect(h *config.WebRedirect, w http.ResponseWriter, r *http.Reques
return true return true
} }
// HandleInternal passes the request to an internal service.
func HandleInternal(h *config.WebInternal, w http.ResponseWriter, r *http.Request) (handled bool) {
h.Handler.ServeHTTP(w, r)
return true
}
// HandleForward handles a request by forwarding it to another webserver and // HandleForward handles a request by forwarding it to another webserver and
// passing the response on. I.e. a reverse proxy. It handles websocket // passing the response on. I.e. a reverse proxy. It handles websocket
// connections by monitoring the websocket handshake and then just passing along the // connections by monitoring the websocket handshake and then just passing along the
// websocket frames. // websocket frames.
func HandleForward(h *config.WebForward, w http.ResponseWriter, r *http.Request, path string) (handled bool) { func HandleForward(h *config.WebForward, w http.ResponseWriter, r *http.Request, path string) (handled bool) {
log := func() mlog.Log { log := func() *mlog.Log {
return pkglog.WithContext(r.Context()) return xlog.WithContext(r.Context())
} }
xr := *r xr := *r
@ -486,13 +443,13 @@ func HandleForward(h *config.WebForward, w http.ResponseWriter, r *http.Request,
// ReverseProxy will append any remaining path to the configured target URL. // ReverseProxy will append any remaining path to the configured target URL.
proxy := httputil.NewSingleHostReverseProxy(h.TargetURL) proxy := httputil.NewSingleHostReverseProxy(h.TargetURL)
proxy.FlushInterval = time.Duration(-1) // Flush after each write. proxy.FlushInterval = time.Duration(-1) // Flush after each write.
proxy.ErrorLog = golog.New(mlog.LogWriter(mlog.New("net/http/httputil", nil).WithContext(r.Context()), mlog.LevelDebug, "reverseproxy error"), "", 0) proxy.ErrorLog = golog.New(mlog.ErrWriter(mlog.New("net/http/httputil").WithContext(r.Context()), mlog.LevelDebug, "reverseproxy error"), "", 0)
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) { proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
log().Debugx("forwarding request to backend webserver", err, slog.Any("url", r.URL)) log().Debugx("forwarding request to backend webserver", err, mlog.Field("url", r.URL))
return return
} }
log().Errorx("forwarding request to backend webserver", err, slog.Any("url", r.URL)) log().Errorx("forwarding request to backend webserver", err, mlog.Field("url", r.URL))
if os.IsTimeout(err) { if os.IsTimeout(err) {
http.Error(w, "504 - gateway timeout"+recvid(r), http.StatusGatewayTimeout) http.Error(w, "504 - gateway timeout"+recvid(r), http.StatusGatewayTimeout)
} else { } else {
@ -520,8 +477,8 @@ var errNotImplemented = errors.New("functionality not yet implemented")
// work for little benefit. Besides, the whole point of websockets is to exchange // work for little benefit. Besides, the whole point of websockets is to exchange
// bytes without HTTP being in the way, so let's do that. // bytes without HTTP being in the way, so let's do that.
func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Request, path string) (handled bool) { func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Request, path string) (handled bool) {
log := func() mlog.Log { log := func() *mlog.Log {
return pkglog.WithContext(r.Context()) return xlog.WithContext(r.Context())
} }
lw := w.(*loggingWriter) lw := w.(*loggingWriter)
@ -606,9 +563,7 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
} }
defer func() { defer func() {
if beconn != nil { if beconn != nil {
if err := beconn.Close(); err != nil { beconn.Close()
log().Check(err, "closing backend websocket connection")
}
} }
}() }()
@ -624,9 +579,7 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
} }
defer func() { defer func() {
if cconn != nil { if cconn != nil {
if err := cconn.Close(); err != nil { cconn.Close()
log().Check(err, "closing client websocket connection")
}
} }
}() }()
@ -679,22 +632,18 @@ func forwardWebsocket(h *config.WebForward, w http.ResponseWriter, r *http.Reque
// connection whose closing was already announced with a websocket frame. // connection whose closing was already announced with a websocket frame.
lw.error(<-errc) lw.error(<-errc)
// Close connections so other goroutine stops as well. // Close connections so other goroutine stops as well.
if err := cconn.Close(); err != nil { cconn.Close()
log().Check(err, "closing client websocket connection") beconn.Close()
} cconn = nil
if err := beconn.Close(); err != nil {
log().Check(err, "closing backend websocket connection")
}
// Wait for goroutine so it has updated the logWriter.Size*Client fields before we // Wait for goroutine so it has updated the logWriter.Size*Client fields before we
// continue with logging. // continue with logging.
<-errc <-errc
cconn = nil
return true return true
} }
func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request) (rresp *http.Response, rconn net.Conn, rerr error) { func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request) (rresp *http.Response, rconn net.Conn, rerr error) {
log := func() mlog.Log { log := func() *mlog.Log {
return pkglog.WithContext(r.Context()) return xlog.WithContext(r.Context())
} }
// Dial the backend, possibly doing TLS. We assume the net/http DefaultTransport is // Dial the backend, possibly doing TLS. We assume the net/http DefaultTransport is
@ -737,9 +686,7 @@ func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request)
} }
defer func() { defer func() {
if rerr != nil { if rerr != nil {
if xerr := conn.Close(); xerr != nil { conn.Close()
log().Check(xerr, "cleaning up websocket connection")
}
} }
}() }()
@ -766,9 +713,7 @@ func websocketTransact(ctx context.Context, targetURL *url.URL, r *http.Request)
} }
defer func() { defer func() {
if rerr != nil { if rerr != nil {
if xerr := resp.Body.Close(); xerr != nil { resp.Body.Close()
log().Check(xerr, "closing response body after error")
}
} }
}() }()
if err := conn.SetDeadline(time.Time{}); err != nil { if err := conn.SetDeadline(time.Time{}); err != nil {

View File

@ -18,20 +18,11 @@ import (
"github.com/mjl-/mox/mox-" "github.com/mjl-/mox/mox-"
) )
func tcheck(t *testing.T, err error, msg string) {
t.Helper()
if err != nil {
t.Fatalf("%s: %s", msg, err)
}
}
func TestWebserver(t *testing.T) { func TestWebserver(t *testing.T) {
os.RemoveAll("../testdata/webserver/data") os.RemoveAll("../testdata/webserver/data")
mox.ConfigStaticPath = filepath.FromSlash("../testdata/webserver/mox.conf") mox.ConfigStaticPath = "../testdata/webserver/mox.conf"
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf") mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
mox.MustLoadConfig(true, false) mox.MustLoadConfig(false)
loadStaticGzipCache(mox.DataDirPath("tmp/httpstaticcompresscache"), 1024*1024)
srv := &serve{Webserver: true} srv := &serve{Webserver: true}
@ -68,12 +59,10 @@ func TestWebserver(t *testing.T) {
test("GET", "http://schemeredir.example", nil, http.StatusPermanentRedirect, "", map[string]string{"Location": "https://schemeredir.example/"}) test("GET", "http://schemeredir.example", nil, http.StatusPermanentRedirect, "", map[string]string{"Location": "https://schemeredir.example/"})
test("GET", "https://schemeredir.example", nil, http.StatusNotFound, "", nil) test("GET", "https://schemeredir.example", nil, http.StatusNotFound, "", nil)
accgzip := map[string]string{"Accept-Encoding": "gzip"} test("GET", "http://mox.example/static/", nil, http.StatusOK, "", map[string]string{"X-Test": "mox"}) // index.html
test("GET", "http://mox.example/static/", accgzip, http.StatusOK, "", map[string]string{"X-Test": "mox", "Content-Encoding": "gzip"}) // index.html test("GET", "http://mox.example/static/dir/", nil, http.StatusOK, "", map[string]string{"X-Test": "mox"}) // listing
test("GET", "http://mox.example/static/dir/hi.txt", accgzip, http.StatusOK, "", map[string]string{"X-Test": "mox", "Content-Encoding": ""}) // too small to compress test("GET", "http://mox.example/static/dir", nil, http.StatusTemporaryRedirect, "", map[string]string{"Location": "/static/dir/"}) // redirect to dir
test("GET", "http://mox.example/static/dir/", accgzip, http.StatusOK, "", map[string]string{"X-Test": "mox", "Content-Encoding": "gzip"}) // listing test("GET", "http://mox.example/static/bogus", nil, http.StatusNotFound, "", nil)
test("GET", "http://mox.example/static/dir", accgzip, http.StatusTemporaryRedirect, "", map[string]string{"Location": "/static/dir/"}) // redirect to dir
test("GET", "http://mox.example/static/bogus", accgzip, http.StatusNotFound, "", map[string]string{"Content-Encoding": ""})
test("GET", "http://mox.example/nolist/", nil, http.StatusOK, "", nil) // index.html test("GET", "http://mox.example/nolist/", nil, http.StatusOK, "", nil) // index.html
test("GET", "http://mox.example/nolist/dir/", nil, http.StatusForbidden, "", nil) // no listing test("GET", "http://mox.example/nolist/dir/", nil, http.StatusForbidden, "", nil) // no listing
@ -134,37 +123,13 @@ func TestWebserver(t *testing.T) {
test("GET", "http://mox.example/bogus", nil, http.StatusNotFound, "", nil) // path not registered. test("GET", "http://mox.example/bogus", nil, http.StatusNotFound, "", nil) // path not registered.
test("GET", "http://bogus.mox.example/static/", nil, http.StatusNotFound, "", nil) // domain not registered. test("GET", "http://bogus.mox.example/static/", nil, http.StatusNotFound, "", nil) // domain not registered.
test("GET", "http://mox.example/xadmin/", nil, http.StatusOK, "", nil) // internal admin service
test("GET", "http://mox.example/xaccount/", nil, http.StatusOK, "", nil) // internal account service
test("GET", "http://mox.example/xwebmail/", nil, http.StatusOK, "", nil) // internal webmail service
test("GET", "http://mox.example/xwebapi/v0/", nil, http.StatusOK, "", nil) // internal webapi service
npaths := len(staticgzcache.paths)
if npaths != 1 {
t.Fatalf("%d file(s) in staticgzcache, expected 1", npaths)
}
loadStaticGzipCache(mox.DataDirPath("tmp/httpstaticcompresscache"), 1024*1024)
npaths = len(staticgzcache.paths)
if npaths != 1 {
t.Fatalf("%d file(s) in staticgzcache after loading from disk, expected 1", npaths)
}
loadStaticGzipCache(mox.DataDirPath("tmp/httpstaticcompresscache"), 0)
npaths = len(staticgzcache.paths)
if npaths != 0 {
t.Fatalf("%d file(s) in staticgzcache after setting max size to 0, expected 0", npaths)
}
loadStaticGzipCache(mox.DataDirPath("tmp/httpstaticcompresscache"), 0)
npaths = len(staticgzcache.paths)
if npaths != 0 {
t.Fatalf("%d file(s) in staticgzcache after setting max size to 0 and reloading from disk, expected 0", npaths)
}
} }
func TestWebsocket(t *testing.T) { func TestWebsocket(t *testing.T) {
os.RemoveAll("../testdata/websocket/data") os.RemoveAll("../testdata/websocket/data")
mox.ConfigStaticPath = filepath.FromSlash("../testdata/websocket/mox.conf") mox.ConfigStaticPath = "../testdata/websocket/mox.conf"
mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf") mox.ConfigDynamicPath = filepath.Join(filepath.Dir(mox.ConfigStaticPath), "domains.conf")
mox.MustLoadConfig(true, false) mox.MustLoadConfig(false)
srv := &serve{Webserver: true} srv := &serve{Webserver: true}
@ -339,4 +304,5 @@ func TestWebsocket(t *testing.T) {
w.WriteHeader(http.StatusSwitchingProtocols) w.WriteHeader(http.StatusSwitchingProtocols)
}) })
test("GET", wsreqhdrs, http.StatusSwitchingProtocols, wsresphdrs) test("GET", wsreqhdrs, http.StatusSwitchingProtocols, wsresphdrs)
} }

Some files were not shown because too many files have changed in this diff Show More