add a webapi and webhooks for a simple http/json-based api

for applications to compose/send messages, receive delivery feedback, and
maintain suppression lists.

this is an alternative to applications using a library to compose messages,
submitting those messages using smtp, and monitoring a mailbox with imap for
DSNs, which can be processed into the equivalent of suppression lists. but you
need to know about all these standards/protocols and find libraries. by using
the webapi & webhooks, you just need a http & json library.

unfortunately, there is no standard for these kinds of api, so mox has made up
yet another one...

matching incoming DSNs about deliveries to original outgoing messages requires
keeping history of "retired" messages (delivered from the queue, either
successfully or failed). this can be enabled per account. history is also
useful for debugging deliveries. we now also keep history of each delivery
attempt, accessible while still in the queue, and kept when a message is
retired. the queue webadmin pages now also have pagination, to show potentially
large history.

a queue of webhook calls is now managed too. failures are retried similar to
message deliveries. webhooks can also be saved to the retired list after
completing. also configurable per account.

messages can be sent with a "unique smtp mail from" address. this can only be
used if the domain is configured with a localpart catchall separator such as
"+". when enabled, a queued message gets assigned a random "fromid", which is
added after the separator when sending. when DSNs are returned, they can be
related to previously sent messages based on this fromid. in the future, we can
implement matching on the "envid" used in the smtp dsn extension, or on the
"message-id" of the message. using a fromid can be triggered by authenticating
with a login email address that is configured as enabling fromid.

suppression lists are automatically managed per account. if a delivery attempt
results in certain smtp errors, the destination address is added to the
suppression list. future messages queued for that recipient will immediately
fail without a delivery attempt. suppression lists protect your mail server
reputation.

submitted messages can carry "extra" data through the queue and webhooks for
outgoing deliveries. through webapi as a json object, through smtp submission
as message headers of the form "x-mox-extra-<key>: value".

to make it easy to test webapi/webhooks locally, the "localserve" mode actually
puts messages in the queue. when it's time to deliver, it still won't do a full
delivery attempt, but just delivers to the sender account. unless the recipient
address has a special form, simulating a failure to deliver.

admins now have more control over the queue. "hold rules" can be added to mark
newly queued messages as "on hold", pausing delivery. rules can be about
certain sender or recipient domains/addresses, or apply to all messages pausing
the entire queue. also useful for (local) testing.

new config options have been introduced. they are editable through the admin
and/or account web interfaces.

the webapi http endpoints are enabled for newly generated configs with the
quickstart, and in localserve. existing configurations must explicitly enable
the webapi in mox.conf.

gopherwatch.org was created to dogfood this code. it initially used just the
compose/smtpclient/imapclient mox packages to send messages and process
delivery feedback. it will get a config option to use the mox webapi/webhooks
instead. the gopherwatch code to use webapi/webhook is smaller and simpler, and
developing that shaped development of the mox webapi/webhooks.

for issue #31 by cuu508
This commit is contained in:
Mechiel Lukkien
2024-04-15 21:49:02 +02:00
parent 8bec5ef7d4
commit 09fcc49223
87 changed files with 15556 additions and 1306 deletions

View File

@ -30,6 +30,7 @@ import (
"github.com/mjl-/mox/smtpclient"
"github.com/mjl-/mox/store"
"github.com/mjl-/mox/tlsrpt"
"github.com/mjl-/mox/webhook"
)
// Increased each time an outgoing connection is made for direct delivery. Used by
@ -155,7 +156,7 @@ func deliverDirect(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
if permanent {
err = smtpclient.Error{Permanent: true, Err: err}
}
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, err)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, err)
return
}
@ -175,7 +176,7 @@ func deliverDirect(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
} else {
qlog.Infox("mtasts lookup temporary error, aborting delivery attempt", err, slog.Any("domain", origNextHop))
recipientDomainResult.Summary.TotalFailureSessionCount++
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, err)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, err)
return
}
}
@ -298,19 +299,39 @@ func deliverDirect(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
continue
}
delIDs := make([]int64, len(result.delivered))
delMsgs := make([]Msg, len(result.delivered))
for i, mr := range result.delivered {
mqlog := nqlog.With(slog.Int64("msgid", mr.msg.ID), slog.Any("recipient", mr.msg.Recipient()))
mqlog.Info("delivered from queue")
delIDs[i] = mr.msg.ID
mr.msg.markResult(0, "", "", true)
delMsgs[i] = *mr.msg
}
if len(delIDs) > 0 {
if err := queueDelete(context.Background(), delIDs...); err != nil {
nqlog.Errorx("deleting messages from queue after delivery", err)
if len(delMsgs) > 0 {
err := DB.Write(context.Background(), func(tx *bstore.Tx) error {
return retireMsgs(nqlog, tx, webhook.EventDelivered, 0, "", nil, delMsgs...)
})
if err != nil {
nqlog.Errorx("deleting messages from queue database after delivery", err)
} else if err := removeMsgsFS(nqlog, delMsgs...); err != nil {
nqlog.Errorx("removing queued messages from file system after delivery", err)
}
kick()
}
for _, mr := range result.failed {
fail(ctx, nqlog, []*Msg{mr.msg}, m0.DialedIPs, backoff, remoteMTA, smtpclient.Error(mr.resp))
if len(result.failed) > 0 {
err := DB.Write(context.Background(), func(tx *bstore.Tx) error {
for _, mr := range result.failed {
failMsgsTx(nqlog, tx, []*Msg{mr.msg}, m0.DialedIPs, backoff, remoteMTA, smtpclient.Error(mr.resp))
}
return nil
})
if err != nil {
for _, mr := range result.failed {
nqlog.Errorx("error processing delivery failure for messages", err,
slog.Int64("msgid", mr.msg.ID),
slog.Any("recipient", mr.msg.Recipient()))
}
}
kick()
}
return
}
@ -335,11 +356,11 @@ func deliverDirect(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
Secode: smtp.SePol7MissingReqTLS30,
Err: fmt.Errorf("destination servers do not support requiretls"),
}
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, remoteMTA, err)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, remoteMTA, err)
return
}
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, remoteMTA, lastErr)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, remoteMTA, lastErr)
return
}

View File

@ -8,6 +8,7 @@ import (
"log/slog"
"net"
"os"
"slices"
"strings"
"time"
@ -24,6 +25,7 @@ import (
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/smtpclient"
"github.com/mjl-/mox/store"
"github.com/mjl-/mox/webhook"
)
var (
@ -35,8 +37,32 @@ var (
)
)
// todo: rename function, perhaps put some of the params in a delivery struct so we don't pass all the params all the time?
func fail(ctx context.Context, qlog mlog.Log, msgs []*Msg, dialedIPs map[string][]net.IP, backoff time.Duration, remoteMTA dsn.NameIP, err error) {
// failMsgsDB calls failMsgsTx with a new transaction, logging transaction errors.
func failMsgsDB(qlog mlog.Log, msgs []*Msg, dialedIPs map[string][]net.IP, backoff time.Duration, remoteMTA dsn.NameIP, err error) {
xerr := DB.Write(context.Background(), func(tx *bstore.Tx) error {
failMsgsTx(qlog, tx, msgs, dialedIPs, backoff, remoteMTA, err)
return nil
})
if xerr != nil {
for _, m := range msgs {
qlog.Errorx("error marking delivery as failed", xerr,
slog.String("delivererr", err.Error()),
slog.Int64("msgid", m.ID),
slog.Any("recipient", m.Recipient()),
slog.Duration("backoff", backoff),
slog.Time("nextattempt", m.NextAttempt))
}
}
kick()
}
// todo: perhaps put some of the params in a delivery struct so we don't pass all the params all the time?
// failMsgsTx processes a failure to deliver msgs. If the error is permanent, a DSN
// is delivered to the sender account.
// Caller must call kick() after commiting the transaction for any (re)scheduling
// of messages and webhooks.
func failMsgsTx(qlog mlog.Log, tx *bstore.Tx, msgs []*Msg, dialedIPs map[string][]net.IP, backoff time.Duration, remoteMTA dsn.NameIP, err error) {
// todo future: when we implement relaying, we should be able to send DSNs to non-local users. and possibly specify a null mailfrom. ../rfc/5321:1503
// todo future: when we implement relaying, and a dsn cannot be delivered, and requiretls was active, we cannot drop the message. instead deliver to local postmaster? though ../rfc/8689:383 may intend to say the dsn should be delivered without requiretls?
// todo future: when we implement smtp dsn extension, parameter RET=FULL must be disregarded for messages with REQUIRETLS. ../rfc/8689:379
@ -49,6 +75,7 @@ func fail(ctx context.Context, qlog mlog.Log, msgs []*Msg, dialedIPs map[string]
var errmsg = err.Error()
var code int
var secodeOpt string
var event webhook.OutgoingEvent
if errors.As(err, &cerr) {
if cerr.Line != "" {
smtpLines = append([]string{cerr.Line}, cerr.MoreLines...)
@ -69,22 +96,56 @@ func fail(ctx context.Context, qlog mlog.Log, msgs []*Msg, dialedIPs map[string]
}
if permanent || m0.MaxAttempts == 0 && m0.Attempts >= 8 || m0.MaxAttempts > 0 && m0.Attempts >= m0.MaxAttempts {
for _, m := range msgs {
qmlog := qlog.With(slog.Int64("msgid", m.ID), slog.Any("recipient", m.Recipient()))
qmlog.Errorx("permanent failure delivering from queue", err)
deliverDSNFailure(ctx, qmlog, *m, remoteMTA, secodeOpt, errmsg, smtpLines)
event = webhook.EventFailed
if errors.Is(err, errSuppressed) {
event = webhook.EventSuppressed
}
if err := queueDelete(context.Background(), ids...); err != nil {
qlog.Errorx("deleting messages from queue after permanent failure", err)
}
return
}
// All messages should have the same DialedIPs, so we can update them all at once.
qup := bstore.QueryDB[Msg](context.Background(), DB)
qup.FilterIDs(ids)
if _, xerr := qup.UpdateNonzero(Msg{LastError: errmsg, DialedIPs: dialedIPs}); err != nil {
qlog.Errorx("storing delivery error", xerr, slog.String("deliveryerror", errmsg))
rmsgs := make([]Msg, len(msgs))
var scl []suppressionCheck
for i, m := range msgs {
rm := *m
rm.DialedIPs = dialedIPs
rm.markResult(code, secodeOpt, errmsg, false)
qmlog := qlog.With(slog.Int64("msgid", rm.ID), slog.Any("recipient", m.Recipient()))
qmlog.Errorx("permanent failure delivering from queue", err)
deliverDSNFailure(qmlog, rm, remoteMTA, secodeOpt, errmsg, smtpLines)
rmsgs[i] = rm
// If this was an smtp error from remote, we'll pass the failure to the
// suppression list.
if code == 0 {
continue
}
sc := suppressionCheck{
MsgID: rm.ID,
Account: rm.SenderAccount,
Recipient: rm.Recipient(),
Code: code,
Secode: secodeOpt,
Source: "queue",
}
scl = append(scl, sc)
}
var suppressedMsgIDs []int64
if len(scl) > 0 {
var err error
suppressedMsgIDs, err = suppressionProcess(qlog, tx, scl...)
if err != nil {
qlog.Errorx("processing delivery failure in suppression list", err)
return
}
}
err := retireMsgs(qlog, tx, event, code, secodeOpt, suppressedMsgIDs, rmsgs...)
if err != nil {
qlog.Errorx("deleting queue messages from database after permanent failure", err)
} else if err := removeMsgsFS(qlog, rmsgs...); err != nil {
qlog.Errorx("remove queue messages from file system after permanent failure", err)
}
return
}
if m0.Attempts == 5 {
@ -95,7 +156,7 @@ func fail(ctx context.Context, qlog mlog.Log, msgs []*Msg, dialedIPs map[string]
for _, m := range msgs {
qmlog := qlog.With(slog.Int64("msgid", m.ID), slog.Any("recipient", m.Recipient()))
qmlog.Errorx("temporary failure delivering from queue, sending delayed dsn", err, slog.Duration("backoff", backoff))
deliverDSNDelay(ctx, qmlog, *m, remoteMTA, secodeOpt, errmsg, smtpLines, retryUntil)
deliverDSNDelay(qmlog, *m, remoteMTA, secodeOpt, errmsg, smtpLines, retryUntil)
}
} else {
for _, m := range msgs {
@ -106,9 +167,53 @@ func fail(ctx context.Context, qlog mlog.Log, msgs []*Msg, dialedIPs map[string]
slog.Time("nextattempt", m0.NextAttempt))
}
}
process := func() error {
// Update DialedIPs in message, and record the result.
qup := bstore.QueryTx[Msg](tx)
qup.FilterIDs(ids)
umsgs, err := qup.List()
if err != nil {
return fmt.Errorf("retrieving messages for marking temporary delivery error: %v", err)
}
for _, um := range umsgs {
// All messages should have the same DialedIPs.
um.DialedIPs = dialedIPs
um.markResult(code, secodeOpt, errmsg, false)
if err := tx.Update(&um); err != nil {
return fmt.Errorf("updating message after temporary failure to deliver: %v", err)
}
}
// If configured, we'll queue webhooks for delivery.
accConf, ok := mox.Conf.Account(m0.SenderAccount)
if !(ok && accConf.OutgoingWebhook != nil && (len(accConf.OutgoingWebhook.Events) == 0 || slices.Contains(accConf.OutgoingWebhook.Events, string(webhook.EventDelayed)))) {
return nil
}
hooks := make([]Hook, len(msgs))
for i, m := range msgs {
var err error
hooks[i], err = hookCompose(*m, accConf.OutgoingWebhook.URL, accConf.OutgoingWebhook.Authorization, webhook.EventDelayed, false, code, secodeOpt)
if err != nil {
return fmt.Errorf("composing webhook for failed delivery attempt for msg id %d: %v", m.ID, err)
}
}
now := time.Now()
for i := range hooks {
if err := hookInsert(tx, &hooks[i], now, accConf.KeepRetiredWebhookPeriod); err != nil {
return fmt.Errorf("inserting webhook into queue: %v", err)
}
qlog.Debug("queueing webhook for temporary delivery errors", hooks[i].attrs()...)
}
return nil
}
if err := process(); err != nil {
qlog.Errorx("processing temporary delivery error", err, slog.String("deliveryerror", errmsg))
}
}
func deliverDSNFailure(ctx context.Context, log mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, smtpLines []string) {
func deliverDSNFailure(log mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, smtpLines []string) {
const subject = "mail delivery failed"
message := fmt.Sprintf(`
Delivery has failed permanently for your email to:
@ -125,10 +230,10 @@ Error during the last delivery attempt:
message += "\nFull SMTP response:\n\n\t" + strings.Join(smtpLines, "\n\t") + "\n"
}
deliverDSN(ctx, log, m, remoteMTA, secodeOpt, errmsg, smtpLines, true, nil, subject, message)
deliverDSN(log, m, remoteMTA, secodeOpt, errmsg, smtpLines, true, nil, subject, message)
}
func deliverDSNDelay(ctx context.Context, log mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, smtpLines []string, retryUntil time.Time) {
func deliverDSNDelay(log mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, smtpLines []string, retryUntil time.Time) {
// Should not happen, but doesn't hurt to prevent sending delayed delivery
// notifications for DMARC reports. We don't want to waste postmaster attention.
if m.IsDMARCReport {
@ -152,14 +257,14 @@ Error during the last delivery attempt:
message += "\nFull SMTP response:\n\n\t" + strings.Join(smtpLines, "\n\t") + "\n"
}
deliverDSN(ctx, log, m, remoteMTA, secodeOpt, errmsg, smtpLines, false, &retryUntil, subject, message)
deliverDSN(log, m, remoteMTA, secodeOpt, errmsg, smtpLines, false, &retryUntil, subject, message)
}
// We only queue DSNs for delivery failures for emails submitted by authenticated
// users. So we are delivering to local users. ../rfc/5321:1466
// ../rfc/5321:1494
// ../rfc/7208:490
func deliverDSN(ctx context.Context, log mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, smtpLines []string, permanent bool, retryUntil *time.Time, subject, textBody string) {
func deliverDSN(log mlog.Log, m Msg, remoteMTA dsn.NameIP, secodeOpt, errmsg string, smtpLines []string, permanent bool, retryUntil *time.Time, subject, textBody string) {
kind := "delayed delivery"
if permanent {
kind = "failure"
@ -203,7 +308,7 @@ func deliverDSN(ctx context.Context, log mlog.Log, m Msg, remoteMTA dsn.NameIP,
// ../rfc/3461:1329
var smtpDiag string
if len(smtpLines) > 0 {
smtpDiag = "smtp; " + strings.Join(smtpLines, " ")
smtpDiag = strings.Join(smtpLines, " ")
}
dsnMsg := &dsn.Message{
@ -221,14 +326,14 @@ func deliverDSN(ctx context.Context, log mlog.Log, m Msg, remoteMTA dsn.NameIP,
Recipients: []dsn.Recipient{
{
FinalRecipient: m.Recipient(),
Action: action,
Status: status,
StatusComment: errmsg,
RemoteMTA: remoteMTA,
DiagnosticCode: smtpDiag,
LastAttemptDate: *m.LastAttempt,
WillRetryUntil: retryUntil,
FinalRecipient: m.Recipient(),
Action: action,
Status: status,
StatusComment: errmsg,
RemoteMTA: remoteMTA,
DiagnosticCodeSMTP: smtpDiag,
LastAttemptDate: *m.LastAttempt,
WillRetryUntil: retryUntil,
},
},

1240
queue/hook.go Normal file

File diff suppressed because it is too large Load Diff

688
queue/hook_test.go Normal file
View File

@ -0,0 +1,688 @@
package queue
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"slices"
"strings"
"testing"
"time"
"github.com/mjl-/bstore"
"github.com/mjl-/mox/dsn"
"github.com/mjl-/mox/message"
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/store"
"github.com/mjl-/mox/webhook"
)
// Test webhooks for incoming message that is not related to outgoing deliveries.
func TestHookIncoming(t *testing.T) {
acc, cleanup := setup(t)
defer cleanup()
err := Init()
tcheck(t, err, "queue init")
accret, err := store.OpenAccount(pkglog, "retired")
tcheck(t, err, "open account for retired")
defer func() {
accret.Close()
accret.CheckClosed()
}()
testIncoming := func(a *store.Account, expIn bool) {
t.Helper()
_, err := bstore.QueryDB[Hook](ctxbg, DB).Delete()
tcheck(t, err, "clean up hooks")
mr := bytes.NewReader([]byte(testmsg))
now := time.Now().Round(0)
m := store.Message{
ID: 123,
RemoteIP: "::1",
MailFrom: "sender@remote.example",
MailFromLocalpart: "sender",
MailFromDomain: "remote.example",
RcptToLocalpart: "rcpt",
RcptToDomain: "mox.example",
MsgFromLocalpart: "mjl",
MsgFromDomain: "mox.example",
MsgFromOrgDomain: "mox.example",
EHLOValidated: true,
MailFromValidated: true,
MsgFromValidated: true,
EHLOValidation: store.ValidationPass,
MailFromValidation: store.ValidationPass,
MsgFromValidation: store.ValidationDMARC,
DKIMDomains: []string{"remote.example"},
Received: now,
Size: int64(len(testmsg)),
}
part, err := message.EnsurePart(pkglog.Logger, true, mr, int64(len(testmsg)))
tcheck(t, err, "parsing message")
err = Incoming(ctxbg, pkglog, a, "<random@localhost>", m, part, "Inbox")
tcheck(t, err, "pass incoming message")
hl, err := bstore.QueryDB[Hook](ctxbg, DB).List()
tcheck(t, err, "list hooks")
if !expIn {
tcompare(t, len(hl), 0)
return
}
tcompare(t, len(hl), 1)
h := hl[0]
tcompare(t, h.IsIncoming, true)
var in webhook.Incoming
dec := json.NewDecoder(strings.NewReader(h.Payload))
err = dec.Decode(&in)
tcheck(t, err, "decode incoming webhook")
expIncoming := webhook.Incoming{
From: []webhook.NameAddress{{Address: "mjl@mox.example"}},
To: []webhook.NameAddress{{Address: "mjl@mox.example"}},
CC: []webhook.NameAddress{},
BCC: []webhook.NameAddress{},
ReplyTo: []webhook.NameAddress{},
References: []string{},
Subject: "test",
Text: "test email\n",
Structure: webhook.PartStructure(&part),
Meta: webhook.IncomingMeta{
MsgID: m.ID,
MailFrom: m.MailFrom,
MailFromValidated: m.MailFromValidated,
MsgFromValidated: m.MsgFromValidated,
RcptTo: "rcpt@mox.example",
DKIMVerifiedDomains: []string{"remote.example"},
RemoteIP: "::1",
Received: m.Received,
MailboxName: "Inbox",
Automated: false,
},
}
tcompare(t, in, expIncoming)
}
testIncoming(acc, false)
testIncoming(accret, true)
}
// Test with fromid and various DSNs, and delivery.
func TestFromIDIncomingDelivery(t *testing.T) {
acc, cleanup := setup(t)
defer cleanup()
err := Init()
tcheck(t, err, "queue init")
accret, err := store.OpenAccount(pkglog, "retired")
tcheck(t, err, "open account for retired")
defer func() {
accret.Close()
accret.CheckClosed()
}()
// Account that only gets webhook calls, but no retired webhooks.
acchook, err := store.OpenAccount(pkglog, "hook")
tcheck(t, err, "open account for hook")
defer func() {
acchook.Close()
acchook.CheckClosed()
}()
addr, err := smtp.ParseAddress("mjl@mox.example")
tcheck(t, err, "parse address")
path := addr.Path()
now := time.Now().Round(0)
m := store.Message{
ID: 123,
RemoteIP: "::1",
MailFrom: "sender@remote.example",
MailFromLocalpart: "sender",
MailFromDomain: "remote.example",
RcptToLocalpart: "rcpt",
RcptToDomain: "mox.example",
MsgFromLocalpart: "mjl",
MsgFromDomain: "mox.example",
MsgFromOrgDomain: "mox.example",
EHLOValidated: true,
MailFromValidated: true,
MsgFromValidated: true,
EHLOValidation: store.ValidationPass,
MailFromValidation: store.ValidationPass,
MsgFromValidation: store.ValidationDMARC,
DKIMDomains: []string{"remote.example"},
Received: now,
DSN: true,
}
testIncoming := func(a *store.Account, rawmsg []byte, retiredFromID string, expIn bool, expOut *webhook.Outgoing) {
t.Helper()
_, err := bstore.QueryDB[Hook](ctxbg, DB).Delete()
tcheck(t, err, "clean up hooks")
_, err = bstore.QueryDB[MsgRetired](ctxbg, DB).Delete()
tcheck(t, err, "clean up retired messages")
qmr := MsgRetired{
SenderAccount: a.Name,
SenderLocalpart: "sender",
SenderDomainStr: "remote.example",
RecipientLocalpart: "rcpt",
RecipientDomain: path.IPDomain,
RecipientDomainStr: "mox.example",
RecipientAddress: "rcpt@mox.example",
Success: true,
KeepUntil: now.Add(time.Minute),
}
m.RcptToLocalpart = "mjl"
qmr.FromID = retiredFromID
m.Size = int64(len(rawmsg))
m.RcptToLocalpart += smtp.Localpart("+unique")
err = DB.Insert(ctxbg, &qmr)
tcheck(t, err, "insert retired message to match")
if expOut != nil {
expOut.QueueMsgID = qmr.ID
}
mr := bytes.NewReader(rawmsg)
part, err := message.EnsurePart(pkglog.Logger, true, mr, int64(len(rawmsg)))
tcheck(t, err, "parsing message")
err = Incoming(ctxbg, pkglog, a, "<random@localhost>", m, part, "Inbox")
tcheck(t, err, "pass incoming message")
hl, err := bstore.QueryDB[Hook](ctxbg, DB).List()
tcheck(t, err, "list hooks")
if !expIn && expOut == nil {
tcompare(t, len(hl), 0)
return
}
tcompare(t, len(hl), 1)
h := hl[0]
tcompare(t, h.IsIncoming, expIn)
if expIn {
return
}
var out webhook.Outgoing
dec := json.NewDecoder(strings.NewReader(h.Payload))
err = dec.Decode(&out)
tcheck(t, err, "decode outgoing webhook")
out.WebhookQueued = time.Time{}
tcompare(t, &out, expOut)
}
dsncompose := func(m *dsn.Message) []byte {
buf, err := m.Compose(pkglog, false)
tcheck(t, err, "compose dsn")
return buf
}
makedsn := func(action dsn.Action) *dsn.Message {
return &dsn.Message{
From: path,
To: path,
TextBody: "explanation",
MessageID: "<dsnmsgid@localhost>",
ReportingMTA: "localhost",
Recipients: []dsn.Recipient{
{
FinalRecipient: path,
Action: action,
Status: "5.0.0.",
DiagnosticCodeSMTP: "554 5.0.0 error",
},
},
}
}
msgfailed := dsncompose(makedsn(dsn.Failed))
// No FromID to match against, so we get a webhook for a new incoming message.
testIncoming(acc, msgfailed, "", false, nil)
testIncoming(accret, msgfailed, "mismatch", true, nil)
// DSN with multiple recipients are treated as unrecognized dsns.
multidsn := makedsn(dsn.Delivered)
multidsn.Recipients = append(multidsn.Recipients, multidsn.Recipients[0])
msgmultidsn := dsncompose(multidsn)
testIncoming(acc, msgmultidsn, "unique", false, nil)
testIncoming(accret, msgmultidsn, "unique", false, &webhook.Outgoing{
Event: webhook.EventUnrecognized,
DSN: true,
FromID: "unique",
})
msgdelayed := dsncompose(makedsn(dsn.Delayed))
testIncoming(acc, msgdelayed, "unique", false, nil)
testIncoming(accret, msgdelayed, "unique", false, &webhook.Outgoing{
Event: webhook.EventDelayed,
DSN: true,
FromID: "unique",
SMTPCode: 554,
SMTPEnhancedCode: "5.0.0",
})
msgrelayed := dsncompose(makedsn(dsn.Relayed))
testIncoming(acc, msgrelayed, "unique", false, nil)
testIncoming(accret, msgrelayed, "unique", false, &webhook.Outgoing{
Event: webhook.EventRelayed,
DSN: true,
FromID: "unique",
SMTPCode: 554,
SMTPEnhancedCode: "5.0.0",
})
msgunrecognized := dsncompose(makedsn(dsn.Action("bogus")))
testIncoming(acc, msgunrecognized, "unique", false, nil)
testIncoming(accret, msgunrecognized, "unique", false, &webhook.Outgoing{
Event: webhook.EventUnrecognized,
DSN: true,
FromID: "unique",
})
// Not a DSN but to fromid address also causes "unrecognized".
msgunrecognized2 := []byte(testmsg)
testIncoming(acc, msgunrecognized2, "unique", false, nil)
testIncoming(accret, msgunrecognized2, "unique", false, &webhook.Outgoing{
Event: webhook.EventUnrecognized,
DSN: false,
FromID: "unique",
})
msgdelivered := dsncompose(makedsn(dsn.Delivered))
testIncoming(acc, msgdelivered, "unique", false, nil)
testIncoming(accret, msgdelivered, "unique", false, &webhook.Outgoing{
Event: webhook.EventDelivered,
DSN: true,
FromID: "unique",
// This is what DSN claims.
SMTPCode: 554,
SMTPEnhancedCode: "5.0.0",
})
testIncoming(acc, msgfailed, "unique", false, nil)
testIncoming(accret, msgfailed, "unique", false, &webhook.Outgoing{
Event: webhook.EventFailed,
DSN: true,
FromID: "unique",
SMTPCode: 554,
SMTPEnhancedCode: "5.0.0",
})
// We still have a webhook in the queue from the test above.
// Try to get the hook delivered. We'll try various error handling cases and superseding.
qsize, err := HookQueueSize(ctxbg)
tcheck(t, err, "hook queue size")
tcompare(t, qsize, 1)
var handler http.HandlerFunc
handleError := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintln(w, "server error")
})
handleOK := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != "Basic dXNlcm5hbWU6cGFzc3dvcmQ=" {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
if r.Header.Get("X-Mox-Webhook-ID") == "" {
http.Error(w, "missing header x-mox-webhook-id", http.StatusBadRequest)
return
}
if r.Header.Get("X-Mox-Webhook-Attempt") == "" {
http.Error(w, "missing header x-mox-webhook-attempt", http.StatusBadRequest)
return
}
fmt.Fprintln(w, "ok")
})
hs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler.ServeHTTP(w, r)
}))
defer hs.Close()
h, err := bstore.QueryDB[Hook](ctxbg, DB).Get()
tcheck(t, err, "get hook from queue")
next := hookNextWork(ctxbg, pkglog, map[string]struct{}{"https://other.example/": {}})
if next > 0 {
t.Fatalf("next scheduled work should be immediate, is %v", next)
}
// Respond with an error and see a retry is scheduled.
h.URL = hs.URL
// Update hook URL in database, so we can call hookLaunchWork. We'll call
// hookDeliver for later attempts.
err = DB.Update(ctxbg, &h)
tcheck(t, err, "update hook url")
handler = handleError
hookLaunchWork(pkglog, map[string]struct{}{"https://other.example/": {}})
<-hookDeliveryResults
err = DB.Get(ctxbg, &h)
tcheck(t, err, "get hook after failed delivery attempt")
tcompare(t, h.Attempts, 1)
tcompare(t, len(h.Results), 1)
tcompare(t, h.LastResult().Success, false)
tcompare(t, h.LastResult().Code, http.StatusInternalServerError)
tcompare(t, h.LastResult().Response, "server error\n")
next = hookNextWork(ctxbg, pkglog, map[string]struct{}{})
if next <= 0 {
t.Fatalf("next scheduled work is immediate, shoud be in the future")
}
n, err := HookNextAttemptSet(ctxbg, HookFilter{}, time.Now().Add(time.Minute))
tcheck(t, err, "schedule hook to now")
tcompare(t, n, 1)
n, err = HookNextAttemptAdd(ctxbg, HookFilter{}, -time.Minute)
tcheck(t, err, "schedule hook to now")
tcompare(t, n, 1)
next = hookNextWork(ctxbg, pkglog, map[string]struct{}{})
if next > 0 {
t.Fatalf("next scheduled work should be immediate, is %v", next)
}
handler = handleOK
hookDeliver(pkglog, h)
<-hookDeliveryResults
err = DB.Get(ctxbg, &h)
tcompare(t, err, bstore.ErrAbsent)
hr := HookRetired{ID: h.ID}
err = DB.Get(ctxbg, &hr)
tcheck(t, err, "get retired hook after delivery")
tcompare(t, hr.Attempts, 2)
tcompare(t, len(hr.Results), 2)
tcompare(t, hr.LastResult().Success, true)
tcompare(t, hr.LastResult().Code, http.StatusOK)
tcompare(t, hr.LastResult().Response, "ok\n")
// Check that cleaning up retired webhooks works.
cleanupHookRetiredSingle(pkglog)
hrl, err := bstore.QueryDB[HookRetired](ctxbg, DB).List()
tcheck(t, err, "listing retired hooks")
tcompare(t, len(hrl), 0)
// Helper to get a representative webhook added to the queue.
addHook := func(a *store.Account) {
testIncoming(a, msgfailed, "unique", false, &webhook.Outgoing{
Event: webhook.EventFailed,
DSN: true,
FromID: "unique",
SMTPCode: 554,
SMTPEnhancedCode: "5.0.0",
})
}
// Keep attempting and failing delivery until we give up.
addHook(accret)
h, err = bstore.QueryDB[Hook](ctxbg, DB).Get()
tcheck(t, err, "get added hook")
h.URL = hs.URL
handler = handleError
for i := 0; i < len(hookIntervals); i++ {
hookDeliver(pkglog, h)
<-hookDeliveryResults
err := DB.Get(ctxbg, &h)
tcheck(t, err, "get hook")
tcompare(t, h.Attempts, i+1)
}
// Final attempt.
hookDeliver(pkglog, h)
<-hookDeliveryResults
err = DB.Get(ctxbg, &h)
tcompare(t, err, bstore.ErrAbsent)
hr = HookRetired{ID: h.ID}
err = DB.Get(ctxbg, &hr)
tcheck(t, err, "get retired hook after failure")
tcompare(t, hr.Attempts, len(hookIntervals)+1)
tcompare(t, len(hr.Results), len(hookIntervals)+1)
tcompare(t, hr.LastResult().Success, false)
tcompare(t, hr.LastResult().Code, http.StatusInternalServerError)
tcompare(t, hr.LastResult().Response, "server error\n")
// Check account "hook" doesn't get retired webhooks.
addHook(acchook)
h, err = bstore.QueryDB[Hook](ctxbg, DB).Get()
tcheck(t, err, "get added hook")
handler = handleOK
h.URL = hs.URL
hookDeliver(pkglog, h)
<-hookDeliveryResults
err = DB.Get(ctxbg, &h)
tcompare(t, err, bstore.ErrAbsent)
hr = HookRetired{ID: h.ID}
err = DB.Get(ctxbg, &hr)
tcompare(t, err, bstore.ErrAbsent)
// HookCancel
addHook(accret)
h, err = bstore.QueryDB[Hook](ctxbg, DB).Get()
tcheck(t, err, "get added hook")
n, err = HookCancel(ctxbg, pkglog, HookFilter{})
tcheck(t, err, "canceling hook")
tcompare(t, n, 1)
l, err := HookList(ctxbg, HookFilter{}, HookSort{})
tcheck(t, err, "list hook")
tcompare(t, len(l), 0)
// Superseding: When a webhook is scheduled for a message that already has a
// pending webhook, the previous webhook should be removed/retired.
_, err = bstore.QueryDB[HookRetired](ctxbg, DB).Delete()
tcheck(t, err, "clean up retired webhooks")
_, err = bstore.QueryDB[MsgRetired](ctxbg, DB).Delete()
tcheck(t, err, "clean up retired messages")
qmr := MsgRetired{
SenderAccount: accret.Name,
SenderLocalpart: "sender",
SenderDomainStr: "remote.example",
RecipientLocalpart: "rcpt",
RecipientDomain: path.IPDomain,
RecipientDomainStr: "mox.example",
RecipientAddress: "rcpt@mox.example",
Success: true,
KeepUntil: now.Add(time.Minute),
FromID: "unique",
}
err = DB.Insert(ctxbg, &qmr)
tcheck(t, err, "insert retired message to match")
m.RcptToLocalpart = "mjl"
m.Size = int64(len(msgdelayed))
m.RcptToLocalpart += smtp.Localpart("+unique")
mr := bytes.NewReader(msgdelayed)
part, err := message.EnsurePart(pkglog.Logger, true, mr, int64(len(msgdelayed)))
tcheck(t, err, "parsing message")
// Cause first webhook.
err = Incoming(ctxbg, pkglog, accret, "<random@localhost>", m, part, "Inbox")
tcheck(t, err, "pass incoming message")
h, err = bstore.QueryDB[Hook](ctxbg, DB).Get()
tcheck(t, err, "get hook")
// Cause second webhook for same message. First should now be retired and marked as superseded.
err = Incoming(ctxbg, pkglog, accret, "<random@localhost>", m, part, "Inbox")
tcheck(t, err, "pass incoming message again")
h2, err := bstore.QueryDB[Hook](ctxbg, DB).Get()
tcheck(t, err, "get hook")
hr, err = bstore.QueryDB[HookRetired](ctxbg, DB).Get()
tcheck(t, err, "get retired hook")
tcompare(t, h.ID, hr.ID)
tcompare(t, hr.SupersededByID, h2.ID)
tcompare(t, h2.ID > h.ID, true)
}
func TestHookListFilterSort(t *testing.T) {
_, cleanup := setup(t)
defer cleanup()
err := Init()
tcheck(t, err, "queue init")
now := time.Now().Round(0)
h := Hook{0, 0, "fromid", "messageid", "subj", nil, "mjl", "http://localhost", "", false, "delivered", "", now, 0, now, []HookResult{}}
h1 := h
h1.Submitted = now.Add(-time.Second)
h1.NextAttempt = now.Add(time.Minute)
hl := []Hook{h, h, h, h, h, h1}
err = DB.Write(ctxbg, func(tx *bstore.Tx) error {
for i := range hl {
err := hookInsert(tx, &hl[i], now, time.Minute)
tcheck(t, err, "insert hook")
}
return nil
})
tcheck(t, err, "inserting hooks")
h1 = hl[len(hl)-1]
hlrev := slices.Clone(hl)
slices.Reverse(hlrev)
// Ascending by nextattempt,id.
l, err := HookList(ctxbg, HookFilter{}, HookSort{Asc: true})
tcheck(t, err, "list")
tcompare(t, l, hl)
// Descending by nextattempt,id.
l, err = HookList(ctxbg, HookFilter{}, HookSort{})
tcheck(t, err, "list")
tcompare(t, l, hlrev)
// Descending by submitted,id.
l, err = HookList(ctxbg, HookFilter{}, HookSort{Field: "Submitted"})
tcheck(t, err, "list")
ll := append(append([]Hook{}, hlrev[1:]...), hl[5])
tcompare(t, l, ll)
// Filter by all fields to get a single.
allfilters := HookFilter{
Max: 2,
IDs: []int64{h1.ID},
Account: "mjl",
Submitted: "<1s",
NextAttempt: ">1s",
Event: "delivered",
}
l, err = HookList(ctxbg, allfilters, HookSort{})
tcheck(t, err, "list single")
tcompare(t, l, []Hook{h1})
// Paginated NextAttmpt asc.
var lastID int64
var last any
l = nil
for {
nl, err := HookList(ctxbg, HookFilter{Max: 1}, HookSort{Asc: true, LastID: lastID, Last: last})
tcheck(t, err, "list paginated")
l = append(l, nl...)
if len(nl) == 0 {
break
}
tcompare(t, len(nl), 1)
lastID, last = nl[0].ID, nl[0].NextAttempt.Format(time.RFC3339Nano)
}
tcompare(t, l, hl)
// Paginated NextAttempt desc.
l = nil
lastID = 0
last = ""
for {
nl, err := HookList(ctxbg, HookFilter{Max: 1}, HookSort{LastID: lastID, Last: last})
tcheck(t, err, "list paginated")
l = append(l, nl...)
if len(nl) == 0 {
break
}
tcompare(t, len(nl), 1)
lastID, last = nl[0].ID, nl[0].NextAttempt.Format(time.RFC3339Nano)
}
tcompare(t, l, hlrev)
// Paginated Submitted desc.
l = nil
lastID = 0
last = ""
for {
nl, err := HookList(ctxbg, HookFilter{Max: 1}, HookSort{Field: "Submitted", LastID: lastID, Last: last})
tcheck(t, err, "list paginated")
l = append(l, nl...)
if len(nl) == 0 {
break
}
tcompare(t, len(nl), 1)
lastID, last = nl[0].ID, nl[0].Submitted.Format(time.RFC3339Nano)
}
tcompare(t, l, ll)
// Paginated Submitted asc.
l = nil
lastID = 0
last = ""
for {
nl, err := HookList(ctxbg, HookFilter{Max: 1}, HookSort{Field: "Submitted", Asc: true, LastID: lastID, Last: last})
tcheck(t, err, "list paginated")
l = append(l, nl...)
if len(nl) == 0 {
break
}
tcompare(t, len(nl), 1)
lastID, last = nl[0].ID, nl[0].Submitted.Format(time.RFC3339Nano)
}
llrev := slices.Clone(ll)
slices.Reverse(llrev)
tcompare(t, l, llrev)
// Retire messages and do similar but more basic tests. The code is similar.
var hrl []HookRetired
err = DB.Write(ctxbg, func(tx *bstore.Tx) error {
for _, h := range hl {
hr := h.Retired(false, h.NextAttempt, time.Now().Add(time.Minute).Round(0))
err := tx.Insert(&hr)
tcheck(t, err, "inserting retired")
hrl = append(hrl, hr)
}
return nil
})
tcheck(t, err, "adding retired")
// Paginated LastActivity desc.
var lr []HookRetired
lastID = 0
last = ""
l = nil
for {
nl, err := HookRetiredList(ctxbg, HookRetiredFilter{Max: 1}, HookRetiredSort{LastID: lastID, Last: last})
tcheck(t, err, "list paginated")
lr = append(lr, nl...)
if len(nl) == 0 {
break
}
tcompare(t, len(nl), 1)
lastID, last = nl[0].ID, nl[0].LastActivity.Format(time.RFC3339Nano)
}
hrlrev := slices.Clone(hrl)
slices.Reverse(hrlrev)
tcompare(t, lr, hrlrev)
// Filter by all fields to get a single.
allretiredfilters := HookRetiredFilter{
Max: 2,
IDs: []int64{hrlrev[0].ID},
Account: "mjl",
Submitted: "<1s",
LastActivity: ">1s",
Event: "delivered",
}
lr, err = HookRetiredList(ctxbg, allretiredfilters, HookRetiredSort{})
tcheck(t, err, "list single")
tcompare(t, lr, []HookRetired{hrlrev[0]})
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,6 +13,8 @@ import (
"slices"
"time"
"github.com/mjl-/bstore"
"github.com/mjl-/mox/config"
"github.com/mjl-/mox/dns"
"github.com/mjl-/mox/dsn"
@ -22,6 +24,7 @@ import (
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/smtpclient"
"github.com/mjl-/mox/store"
"github.com/mjl-/mox/webhook"
)
// todo: reuse connection? do fewer concurrently (other than with direct delivery).
@ -91,7 +94,7 @@ func deliverSubmit(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
Secode: smtp.SePol7MissingReqTLS30,
Err: fmt.Errorf("transport %s: message requires verified tls but transport does not verify tls", transportName),
}
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, submiterr)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, submiterr)
return
}
@ -126,7 +129,7 @@ func deliverSubmit(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
}
qlog.Errorx("dialing for submission", err, slog.String("remote", addr))
submiterr = fmt.Errorf("transport %s: dialing %s for submission: %w", transportName, addr, err)
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, submiterr)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, submiterr)
return
}
dialcancel()
@ -183,7 +186,7 @@ func deliverSubmit(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
submiterr = smtperr
}
qlog.Errorx("establishing smtp session for submission", submiterr, slog.String("remote", addr))
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, remoteMTA, submiterr)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, remoteMTA, submiterr)
return
}
defer func() {
@ -208,7 +211,7 @@ func deliverSubmit(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
if err != nil {
qlog.Errorx("opening message for delivery", err, slog.String("remote", addr), slog.String("path", p))
submiterr = fmt.Errorf("transport %s: opening message file for submission: %w", transportName, err)
fail(ctx, qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, submiterr)
failMsgsDB(qlog, msgs, m0.DialedIPs, backoff, dsn.NameIP{}, submiterr)
return
}
msgr = store.FileMsgReader(m0.MsgPrefix, f)
@ -229,7 +232,7 @@ func deliverSubmit(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
qlog.Infox("smtp transaction for delivery failed", submiterr)
}
failed = 0 // Reset, we are looking at the SMTP results below.
var delIDs []int64
var delMsgs []Msg
for i, m := range msgs {
qmlog := qlog.With(
slog.Int64("msgid", m.ID),
@ -251,17 +254,24 @@ func deliverSubmit(qlog mlog.Log, resolver dns.Resolver, dialer smtpclient.Diale
err = smtperr
}
qmlog.Errorx("submitting message", err, slog.String("remote", addr))
fail(ctx, qmlog, []*Msg{m}, m0.DialedIPs, backoff, remoteMTA, err)
failMsgsDB(qmlog, []*Msg{m}, m0.DialedIPs, backoff, remoteMTA, err)
failed++
} else {
delIDs = append(delIDs, m.ID)
m.markResult(0, "", "", true)
delMsgs = append(delMsgs, *m)
qmlog.Info("delivered from queue with transport")
delivered++
}
}
if len(delIDs) > 0 {
if err := queueDelete(context.Background(), delIDs...); err != nil {
qlog.Errorx("deleting message from queue after delivery", err)
if len(delMsgs) > 0 {
err := DB.Write(context.Background(), func(tx *bstore.Tx) error {
return retireMsgs(qlog, tx, webhook.EventDelivered, 0, "", nil, delMsgs...)
})
if err != nil {
qlog.Errorx("remove queue message from database after delivery", err)
} else if err := removeMsgsFS(qlog, delMsgs...); err != nil {
qlog.Errorx("remove queue message from file system after delivery", err)
}
kick()
}
}

170
queue/suppression.go Normal file
View File

@ -0,0 +1,170 @@
package queue
import (
"context"
"errors"
"fmt"
"log/slog"
"strings"
"github.com/mjl-/bstore"
"github.com/mjl-/mox/mlog"
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/webapi"
)
// todo: we should be processing spam complaints and add addresses to the list.
var errSuppressed = errors.New("address is on suppression list")
func baseAddress(a smtp.Path) smtp.Path {
s := string(a.Localpart)
s, _, _ = strings.Cut(s, "+")
s, _, _ = strings.Cut(s, "-")
s = strings.ReplaceAll(s, ".", "")
s = strings.ToLower(s)
return smtp.Path{Localpart: smtp.Localpart(s), IPDomain: a.IPDomain}
}
// SuppressionList returns suppression. If account is not empty, only suppression
// for that account are returned.
//
// SuppressionList does not check if an account exists.
func SuppressionList(ctx context.Context, account string) ([]webapi.Suppression, error) {
q := bstore.QueryDB[webapi.Suppression](ctx, DB)
if account != "" {
q.FilterNonzero(webapi.Suppression{Account: account})
}
return q.List()
}
// SuppressionLookup looks up a suppression for an address for an account. Returns
// a nil suppression if not found.
//
// SuppressionLookup does not check if an account exists.
func SuppressionLookup(ctx context.Context, account string, address smtp.Path) (*webapi.Suppression, error) {
baseAddr := baseAddress(address).XString(true)
q := bstore.QueryDB[webapi.Suppression](ctx, DB)
q.FilterNonzero(webapi.Suppression{Account: account, BaseAddress: baseAddr})
sup, err := q.Get()
if err == bstore.ErrAbsent {
return nil, nil
}
return &sup, err
}
// SuppressionAdd adds a suppression for an address for an account, setting
// BaseAddress based on OriginalAddress.
//
// If the base address of original address is already present, an error is
// returned (such as from bstore).
//
// SuppressionAdd does not check if an account exists.
func SuppressionAdd(ctx context.Context, originalAddress smtp.Path, sup *webapi.Suppression) error {
sup.BaseAddress = baseAddress(originalAddress).XString(true)
sup.OriginalAddress = originalAddress.XString(true)
return DB.Insert(ctx, sup)
}
// SuppressionRemove removes a suppression. The base address for the the given
// address is removed.
//
// SuppressionRemove does not check if an account exists.
func SuppressionRemove(ctx context.Context, account string, address smtp.Path) error {
baseAddr := baseAddress(address).XString(true)
q := bstore.QueryDB[webapi.Suppression](ctx, DB)
q.FilterNonzero(webapi.Suppression{Account: account, BaseAddress: baseAddr})
n, err := q.Delete()
if err != nil {
return err
}
if n == 0 {
return bstore.ErrAbsent
}
return nil
}
type suppressionCheck struct {
MsgID int64
Account string
Recipient smtp.Path
Code int
Secode string
Source string
}
// process failures, possibly creating suppressions.
func suppressionProcess(log mlog.Log, tx *bstore.Tx, scl ...suppressionCheck) (suppressedMsgIDs []int64, err error) {
for _, sc := range scl {
xlog := log.With(slog.Any("suppressioncheck", sc))
baseAddr := baseAddress(sc.Recipient).XString(true)
exists, err := bstore.QueryTx[webapi.Suppression](tx).FilterNonzero(webapi.Suppression{Account: sc.Account, BaseAddress: baseAddr}).Exists()
if err != nil {
return nil, fmt.Errorf("checking if address is in suppression list: %v", err)
} else if exists {
xlog.Debug("address already in suppression list")
continue
}
origAddr := sc.Recipient.XString(true)
sup := webapi.Suppression{
Account: sc.Account,
BaseAddress: baseAddr,
OriginalAddress: origAddr,
}
if isImmedateBlock(sc.Code, sc.Secode) {
sup.Reason = fmt.Sprintf("delivery failure from %s with smtp code %d, enhanced code %q", sc.Source, sc.Code, sc.Secode)
} else {
// If two most recent deliveries failed (excluding this one, so three most recent
// messages including this one), we'll add the address to the list.
q := bstore.QueryTx[MsgRetired](tx)
q.FilterNonzero(MsgRetired{RecipientAddress: origAddr})
q.FilterNotEqual("ID", sc.MsgID)
q.SortDesc("LastActivity")
q.Limit(2)
l, err := q.List()
if err != nil {
xlog.Errorx("checking for previous delivery failures", err)
continue
}
if len(l) < 2 || l[0].Success || l[1].Success {
continue
}
sup.Reason = fmt.Sprintf("delivery failure from %s and three consecutive failures", sc.Source)
}
if err := tx.Insert(&sup); err != nil {
return nil, fmt.Errorf("inserting suppression: %v", err)
}
suppressedMsgIDs = append(suppressedMsgIDs, sc.MsgID)
}
return suppressedMsgIDs, nil
}
// Decide whether an SMTP code and short enhanced code is a reason for an
// immediate suppression listing. For some errors, we don't want to bother the
// remote mail server again, or they may decide our behaviour looks spammy.
func isImmedateBlock(code int, secode string) bool {
switch code {
case smtp.C521HostNoMail, // Host is not interested in accepting email at all.
smtp.C550MailboxUnavail, // Likely mailbox does not exist.
smtp.C551UserNotLocal, // Also not interested in accepting email for this address.
smtp.C553BadMailbox, // We are sending a mailbox name that server doesn't understand and won't accept email for.
smtp.C556DomainNoMail: // Remote is not going to accept email for this address/domain.
return true
}
if code/100 != 5 {
return false
}
switch secode {
case smtp.SeAddr1UnknownDestMailbox1, // Recipient localpart doesn't exist.
smtp.SeAddr1UnknownSystem2, // Bad recipient domain.
smtp.SeAddr1MailboxSyntax3, // Remote doesn't understand syntax.
smtp.SeAddr1DestMailboxMoved6, // Address no longer exists.
smtp.SeMailbox2Disabled1, // Account exists at remote, but is disabled.
smtp.SePol7DeliveryUnauth1: // Seems popular for saying we are on a blocklist.
return true
}
return false
}

107
queue/suppression_test.go Normal file
View File

@ -0,0 +1,107 @@
package queue
import (
"testing"
"github.com/mjl-/mox/smtp"
"github.com/mjl-/mox/webapi"
)
func TestSuppression(t *testing.T) {
_, cleanup := setup(t)
defer cleanup()
err := Init()
tcheck(t, err, "queue init")
l, err := SuppressionList(ctxbg, "bogus")
tcheck(t, err, "listing suppressions for unknown account")
tcompare(t, len(l), 0)
l, err = SuppressionList(ctxbg, "") // All
tcheck(t, err, "list suppression for all accounts")
tcompare(t, len(l), 0) // None yet.
addr1, err := smtp.ParseAddress("mjl@mox.example")
tcheck(t, err, "parse address")
path1 := addr1.Path()
addr2, err := smtp.ParseAddress("mjl2@mox.example")
tcheck(t, err, "parse address")
path2 := addr2.Path()
addr2b, err := smtp.ParseAddress("M.j.l2+catchall@Mox.example")
tcheck(t, err, "parse address")
path2b := addr2b.Path()
// No suppression yet.
sup, err := SuppressionLookup(ctxbg, "mjl", path1)
tcheck(t, err, "lookup suppression")
tcompare(t, sup == nil, true)
// No error if account does not exist.
sup, err = SuppressionLookup(ctxbg, "bogus", path1)
tcompare(t, err == nil, true)
tcompare(t, sup == nil, true)
// Can add a suppression once.
err = SuppressionAdd(ctxbg, path1, &webapi.Suppression{Account: "mjl"})
tcheck(t, err, "add suppression")
// No duplicates.
err = SuppressionAdd(ctxbg, path1, &webapi.Suppression{Account: "mjl"})
tcompare(t, err == nil, false)
// Account must be set in Suppresion.
err = SuppressionAdd(ctxbg, path1, &webapi.Suppression{})
tcompare(t, err == nil, false)
// Duplicate check is done after making base address.
err = SuppressionAdd(ctxbg, path2, &webapi.Suppression{Account: "retired"})
tcheck(t, err, "add suppression")
err = SuppressionAdd(ctxbg, path2b, &webapi.Suppression{Account: "retired"})
tcompare(t, err == nil, false) // Duplicate.
l, err = SuppressionList(ctxbg, "") // All
tcheck(t, err, "list suppression for all accounts")
tcompare(t, len(l), 2)
l, err = SuppressionList(ctxbg, "mjl")
tcheck(t, err, "list suppression for mjl")
tcompare(t, len(l), 1)
// path1 is listed for mjl.
sup, err = SuppressionLookup(ctxbg, "mjl", path1)
tcheck(t, err, "lookup")
tcompare(t, sup == nil, false)
// Accounts don't influence each other.
sup, err = SuppressionLookup(ctxbg, "mjl", path2)
tcheck(t, err, "lookup")
tcompare(t, sup == nil, true)
// Simplified address is present.
sup, err = SuppressionLookup(ctxbg, "retired", path2)
tcheck(t, err, "lookup")
tcompare(t, sup == nil, false)
// Original address is also present.
sup, err = SuppressionLookup(ctxbg, "retired", path2b)
tcheck(t, err, "lookup")
tcompare(t, sup == nil, false)
// Can remove again.
err = SuppressionRemove(ctxbg, "mjl", path1)
tcheck(t, err, "remove")
// But not twice.
err = SuppressionRemove(ctxbg, "mjl", path1)
tcompare(t, err == nil, false)
// No longer present.
sup, err = SuppressionLookup(ctxbg, "mjl", path1)
tcheck(t, err, "lookup")
tcompare(t, sup == nil, true)
// Can remove for any form of the address, was added as path2b.
err = SuppressionRemove(ctxbg, "retired", path2b)
tcheck(t, err, "lookup")
// Account names are not validated.
err = SuppressionAdd(ctxbg, path1, &webapi.Suppression{Account: "bogus"})
tcheck(t, err, "add suppression")
err = SuppressionRemove(ctxbg, "bogus", path1)
tcheck(t, err, "remove suppression")
}