Refactor how messages are added to mailboxes

DeliverMessage() is now MessageAdd(), and it takes a Mailbox object that it
modifies but doesn't write to the database (the caller must do it, and plenty
of times can do it more efficiently by doing it once for multiple messages).
The new AddOpts let the caller influence how many checks and how much of the
work MessageAdd() does. The zero-value AddOpts enable all checks and all the
work, but callers can take responsibility of some of the checks/work if it can
do it more efficiently itself.

This simplifies the code in most places, and makes it more efficient. The
checks to update per-mailbox keywords is a bit simpler too now.

We are also more careful to close the junk filter without saving it in case of
errors.

Still part of more upcoming changes.
This commit is contained in:
Mechiel Lukkien 2025-03-01 16:06:01 +01:00
parent 7855a32852
commit 2beb30cc20
No known key found for this signature in database
13 changed files with 410 additions and 362 deletions

2
ctl.go
View File

@ -1450,7 +1450,7 @@ func servectlcmd(ctx context.Context, ctl *ctl, cid int64, shutdown func()) {
if jf == nil { if jf == nil {
return return
} }
err := jf.Close() err := jf.CloseDiscard()
log.Check(err, "closing junk filter during cleanup") log.Check(err, "closing junk filter during cleanup")
}() }()

View File

@ -263,7 +263,6 @@ Accounts:
m := store.Message{ m := store.Message{
MailboxID: inbox.ID, MailboxID: inbox.ID,
MailboxOrigID: inbox.ID, MailboxOrigID: inbox.ID,
MailboxDestinedID: inbox.ID,
RemoteIP: "1.2.3.4", RemoteIP: "1.2.3.4",
RemoteIPMasked1: "1.2.3.4", RemoteIPMasked1: "1.2.3.4",
RemoteIPMasked2: "1.2.3.0", RemoteIPMasked2: "1.2.3.0",
@ -291,11 +290,10 @@ Accounts:
defer store.CloseRemoveTempFile(c.log, mf, "test message") defer store.CloseRemoveTempFile(c.log, mf, "test message")
_, err = fmt.Fprint(mf, msg) _, err = fmt.Fprint(mf, msg)
xcheckf(err, "writing deliver message to file") xcheckf(err, "writing deliver message to file")
err = accTest1.DeliverMessage(c.log, tx, &m, mf, false, true, false, true)
err = tx.Get(&inbox) err = accTest1.MessageAdd(c.log, tx, &inbox, &m, mf, store.AddOpts{})
xcheckf(err, "get inbox") xcheckf(err, "deliver message")
inbox.Add(m.MailboxCounts())
err = tx.Update(&inbox) err = tx.Update(&inbox)
xcheckf(err, "update inbox") xcheckf(err, "update inbox")
@ -317,7 +315,6 @@ Accounts:
m0 := store.Message{ m0 := store.Message{
MailboxID: inbox.ID, MailboxID: inbox.ID,
MailboxOrigID: inbox.ID, MailboxOrigID: inbox.ID,
MailboxDestinedID: inbox.ID,
RemoteIP: "::1", RemoteIP: "::1",
RemoteIPMasked1: "::", RemoteIPMasked1: "::",
RemoteIPMasked2: "::", RemoteIPMasked2: "::",
@ -345,12 +342,8 @@ Accounts:
defer store.CloseRemoveTempFile(c.log, mf0, "test message") defer store.CloseRemoveTempFile(c.log, mf0, "test message")
_, err = fmt.Fprint(mf0, msg0) _, err = fmt.Fprint(mf0, msg0)
xcheckf(err, "writing deliver message to file") xcheckf(err, "writing deliver message to file")
err = accTest2.DeliverMessage(c.log, tx, &m0, mf0, false, false, false, true) err = accTest2.MessageAdd(c.log, tx, &inbox, &m0, mf0, store.AddOpts{})
xcheckf(err, "add message to account test2") xcheckf(err, "add message to account test2")
err = tx.Get(&inbox)
xcheckf(err, "get inbox")
inbox.Add(m0.MailboxCounts())
err = tx.Update(&inbox) err = tx.Update(&inbox)
xcheckf(err, "update inbox") xcheckf(err, "update inbox")
@ -361,7 +354,6 @@ Accounts:
m1 := store.Message{ m1 := store.Message{
MailboxID: sent.ID, MailboxID: sent.ID,
MailboxOrigID: sent.ID, MailboxOrigID: sent.ID,
MailboxDestinedID: sent.ID,
Flags: store.Flags{Seen: true, Junk: true}, Flags: store.Flags{Seen: true, Junk: true},
Size: int64(len(prefix1) + len(msg1)), Size: int64(len(prefix1) + len(msg1)),
MsgPrefix: []byte(prefix1), MsgPrefix: []byte(prefix1),
@ -371,12 +363,8 @@ Accounts:
defer store.CloseRemoveTempFile(c.log, mf1, "test message") defer store.CloseRemoveTempFile(c.log, mf1, "test message")
_, err = fmt.Fprint(mf1, msg1) _, err = fmt.Fprint(mf1, msg1)
xcheckf(err, "writing deliver message to file") xcheckf(err, "writing deliver message to file")
err = accTest2.DeliverMessage(c.log, tx, &m1, mf1, false, false, false, true) err = accTest2.MessageAdd(c.log, tx, &sent, &m1, mf1, store.AddOpts{})
xcheckf(err, "add message to account test2") xcheckf(err, "add message to account test2")
err = tx.Get(&sent)
xcheckf(err, "get sent")
sent.Add(m1.MailboxCounts())
err = tx.Update(&sent) err = tx.Update(&sent)
xcheckf(err, "update sent") xcheckf(err, "update sent")

View File

@ -297,16 +297,10 @@ func (c *conn) cmdxReplace(isUID bool, tag, cmd string, p *parser) {
err = tx.Update(&mbSrc) err = tx.Update(&mbSrc)
xcheckf(err, "updating source mailbox counts") xcheckf(err, "updating source mailbox counts")
// The destination mailbox may be the same as source (currently selected), but
// doesn't have to be.
mbDst = c.xmailbox(tx, name, "TRYCREATE") mbDst = c.xmailbox(tx, name, "TRYCREATE")
mbDst.ModSeq = modseq
// Ensure keywords of message are present in destination mailbox. nkeywords := len(mbDst.Keywords)
var mbKwChanged bool
mbDst.Keywords, mbKwChanged = store.MergeKeywords(mbDst.Keywords, keywords)
if mbKwChanged {
changes = append(changes, mbDst.ChangeKeywords())
}
// Make new message to deliver. // Make new message to deliver.
nm = store.Message{ nm = store.Message{
@ -320,17 +314,21 @@ func (c *conn) cmdxReplace(isUID bool, tag, cmd string, p *parser) {
CreateSeq: modseq, CreateSeq: modseq,
} }
// Add counts about new message to mailbox. err = c.account.MessageAdd(c.log, tx, &mbDst, &nm, file, store.AddOpts{})
mbDst.Add(nm.MailboxCounts())
// Update mailbox before delivering, which updates uidnext which we mustn't overwrite.
mbDst.ModSeq = modseq
err = tx.Update(&mbDst)
xcheckf(err, "updating destination mailbox counts")
err = c.account.DeliverMessage(c.log, tx, &nm, file, true, false, false, true)
xcheckf(err, "delivering message") xcheckf(err, "delivering message")
changes = append(changes,
store.ChangeRemoveUIDs{MailboxID: om.MailboxID, UIDs: []store.UID{om.UID}, ModSeq: om.ModSeq},
nm.ChangeAddUID(),
mbDst.ChangeCounts(),
)
if nkeywords != len(mbDst.Keywords) {
changes = append(changes, mbDst.ChangeKeywords())
}
err = tx.Update(&mbDst)
xcheckf(err, "updating destination mailbox")
// Update path to what is stored in the account. We may still have to clean it up on errors. // Update path to what is stored in the account. We may still have to clean it up on errors.
newMsgPath = c.account.MessagePath(nm.ID) newMsgPath = c.account.MessagePath(nm.ID)
oldMsgPath = c.account.MessagePath(om.ID) oldMsgPath = c.account.MessagePath(om.ID)
@ -347,11 +345,6 @@ func (c *conn) cmdxReplace(isUID bool, tag, cmd string, p *parser) {
committed = true committed = true
// Broadcast the change to other connections. // Broadcast the change to other connections.
changes = append(changes,
store.ChangeRemoveUIDs{MailboxID: om.MailboxID, UIDs: []store.UID{om.UID}, ModSeq: om.ModSeq},
nm.ChangeAddUID(),
mbDst.ChangeCounts(),
)
if mbSrc.ID != mbDst.ID { if mbSrc.ID != mbDst.ID {
changes = append(changes, mbSrc.ChangeCounts()) changes = append(changes, mbSrc.ChangeCounts())
} }

View File

@ -2844,7 +2844,7 @@ func (c *conn) cmdCreate(tag, cmd string, p *parser) {
c.xdbwrite(func(tx *bstore.Tx) { c.xdbwrite(func(tx *bstore.Tx) {
var exists bool var exists bool
var err error var err error
changes, created, exists, err = c.account.MailboxCreate(tx, name, specialUse) _, changes, created, exists, err = c.account.MailboxCreate(tx, name, specialUse)
if exists { if exists {
// ../rfc/9051:1914 // ../rfc/9051:1914
xuserErrorf("mailbox already exists") xuserErrorf("mailbox already exists")
@ -3341,7 +3341,7 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
} }
var appends []*appendMsg var appends []*appendMsg
var committed bool var commit bool
defer func() { defer func() {
for _, a := range appends { for _, a := range appends {
if a.file != nil { if a.file != nil {
@ -3349,7 +3349,7 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
c.xsanity(err, "closing APPEND temporary file") c.xsanity(err, "closing APPEND temporary file")
} }
if !committed && a.path != "" { if !commit && a.path != "" {
err := os.Remove(a.path) err := os.Remove(a.path)
c.xsanity(err, "removing APPEND temporary file") c.xsanity(err, "removing APPEND temporary file")
} }
@ -3511,6 +3511,8 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
c.xdbwrite(func(tx *bstore.Tx) { c.xdbwrite(func(tx *bstore.Tx) {
mb = c.xmailbox(tx, name, "TRYCREATE") mb = c.xmailbox(tx, name, "TRYCREATE")
nkeywords := len(mb.Keywords)
// Check quota for all messages at once. // Check quota for all messages at once.
ok, maxSize, err := c.account.CanAddMessageSize(tx, totalSize) ok, maxSize, err := c.account.CanAddMessageSize(tx, totalSize)
xcheckf(err, "checking quota") xcheckf(err, "checking quota")
@ -3522,17 +3524,9 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
modseq, err := c.account.NextModSeq(tx) modseq, err := c.account.NextModSeq(tx)
xcheckf(err, "get next mod seq") xcheckf(err, "get next mod seq")
var mbKwChanged bool mb.ModSeq = modseq
for _, a := range appends {
// Ensure keywords are stored in mailbox.
var kwch bool
mb.Keywords, kwch = store.MergeKeywords(mb.Keywords, a.keywords)
mbKwChanged = mbKwChanged || kwch
}
if mbKwChanged {
changes = append(changes, mb.ChangeKeywords())
}
msgDirs := map[string]struct{}{}
for _, a := range appends { for _, a := range appends {
a.m = store.Message{ a.m = store.Message{
MailboxID: mb.ID, MailboxID: mb.ID,
@ -3544,34 +3538,39 @@ func (c *conn) cmdAppend(tag, cmd string, p *parser) {
ModSeq: modseq, ModSeq: modseq,
CreateSeq: modseq, CreateSeq: modseq,
} }
mb.Add(a.m.MailboxCounts())
}
// Update mailbox before delivering, which updates uidnext which we mustn't overwrite. // todo: do a single junk training
mb.ModSeq = modseq err = c.account.MessageAdd(c.log, tx, &mb, &a.m, a.file, store.AddOpts{SkipDirSync: true})
err = tx.Update(&mb)
xcheckf(err, "updating mailbox counts")
for _, a := range appends {
err = c.account.DeliverMessage(c.log, tx, &a.m, a.file, true, false, false, true)
xcheckf(err, "delivering message") xcheckf(err, "delivering message")
changes = append(changes, a.m.ChangeAddUID())
// Update path to what is stored in the account. We may still have to clean it up on errors. // Update path to what is stored in the account. We may still have to clean it up on errors.
a.path = c.account.MessagePath(a.m.ID) a.path = c.account.MessagePath(a.m.ID)
msgDirs[filepath.Dir(a.path)] = struct{}{}
}
changes = append(changes, mb.ChangeCounts())
if nkeywords != len(mb.Keywords) {
changes = append(changes, mb.ChangeKeywords())
}
err = tx.Update(&mb)
xcheckf(err, "updating mailbox counts")
for dir := range msgDirs {
err := moxio.SyncDir(c.log, dir)
xcheckf(err, "sync dir")
} }
}) })
// Success, make sure messages aren't cleaned up anymore. commit = true
committed = true
// Fetch pending changes, possibly with new UIDs, so we can apply them before adding our own new UID. // Fetch pending changes, possibly with new UIDs, so we can apply them before adding our own new UID.
pendingChanges = c.comm.Get() pendingChanges = c.comm.Get()
// Broadcast the change to other connections. // Broadcast the change to other connections.
for _, a := range appends {
changes = append(changes, a.m.ChangeAddUID())
}
changes = append(changes, mb.ChangeCounts())
c.broadcast(changes) c.broadcast(changes)
}) })
@ -4061,13 +4060,13 @@ func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) {
}() }()
var mbDst store.Mailbox var mbDst store.Mailbox
var nkeywords int
var origUIDs, newUIDs []store.UID var origUIDs, newUIDs []store.UID
var flags []store.Flags var flags []store.Flags
var keywords [][]string var keywords [][]string
var modseq store.ModSeq // For messages in new mailbox, assigned when first message is copied. var modseq store.ModSeq // For messages in new mailbox, assigned when first message is copied.
c.account.WithWLock(func() { c.account.WithWLock(func() {
var mbKwChanged bool
c.xdbwrite(func(tx *bstore.Tx) { c.xdbwrite(func(tx *bstore.Tx) {
mbSrc := c.xmailboxID(tx, c.mailboxID) // Validate. mbSrc := c.xmailboxID(tx, c.mailboxID) // Validate.
@ -4080,6 +4079,8 @@ func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) {
xuserErrorf("no matching messages to copy") xuserErrorf("no matching messages to copy")
} }
nkeywords = len(mbDst.Keywords)
var err error var err error
modseq, err = c.account.NextModSeq(tx) modseq, err = c.account.NextModSeq(tx)
xcheckf(err, "assigning next modseq") xcheckf(err, "assigning next modseq")
@ -4180,7 +4181,7 @@ func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) {
mbDst.Add(m.MailboxCounts()) mbDst.Add(m.MailboxCounts())
} }
mbDst.Keywords, mbKwChanged = store.MergeKeywords(mbDst.Keywords, maps.Keys(mbKeywords)) mbDst.Keywords, _ = store.MergeKeywords(mbDst.Keywords, maps.Keys(mbKeywords))
err = tx.Update(&mbDst) err = tx.Update(&mbDst)
xcheckf(err, "updating destination mailbox for uids, keywords and counts") xcheckf(err, "updating destination mailbox for uids, keywords and counts")
@ -4218,7 +4219,7 @@ func (c *conn) cmdxCopy(isUID bool, tag, cmd string, p *parser) {
changes = append(changes, store.ChangeAddUID{MailboxID: mbDst.ID, UID: uid, ModSeq: modseq, Flags: flags[i], Keywords: keywords[i]}) changes = append(changes, store.ChangeAddUID{MailboxID: mbDst.ID, UID: uid, ModSeq: modseq, Flags: flags[i], Keywords: keywords[i]})
} }
changes = append(changes, mbDst.ChangeCounts()) changes = append(changes, mbDst.ChangeCounts())
if mbKwChanged { if nkeywords != len(mbDst.Keywords) {
changes = append(changes, mbDst.ChangeKeywords()) changes = append(changes, mbDst.ChangeKeywords())
} }
c.broadcast(changes) c.broadcast(changes)
@ -4550,6 +4551,7 @@ func (c *conn) cmdxStore(isUID bool, tag, cmd string, p *parser) {
var err error var err error
modseq, err = c.account.NextModSeq(tx) modseq, err = c.account.NextModSeq(tx)
xcheckf(err, "next modseq") xcheckf(err, "next modseq")
mb.ModSeq = modseq
} }
m.ModSeq = modseq m.ModSeq = modseq
modified[m.ID] = true modified[m.ID] = true
@ -4562,10 +4564,10 @@ func (c *conn) cmdxStore(isUID bool, tag, cmd string, p *parser) {
xcheckf(err, "storing flags in messages") xcheckf(err, "storing flags in messages")
if mb.MailboxCounts != origmb.MailboxCounts || modseq != 0 { if mb.MailboxCounts != origmb.MailboxCounts || modseq != 0 {
mb.ModSeq = modseq
err := tx.Update(&mb) err := tx.Update(&mb)
xcheckf(err, "updating mailbox counts") xcheckf(err, "updating mailbox counts")
}
if mb.MailboxCounts != origmb.MailboxCounts {
changes = append(changes, mb.ChangeCounts()) changes = append(changes, mb.ChangeCounts())
} }
if mbKwChanged { if mbKwChanged {

View File

@ -16,12 +16,11 @@ import (
"strings" "strings"
"time" "time"
"golang.org/x/exp/maps"
"github.com/mjl-/mox/config" "github.com/mjl-/mox/config"
"github.com/mjl-/mox/message" "github.com/mjl-/mox/message"
"github.com/mjl-/mox/metrics" "github.com/mjl-/mox/metrics"
"github.com/mjl-/mox/mox-" "github.com/mjl-/mox/mox-"
"github.com/mjl-/mox/moxio"
"github.com/mjl-/mox/store" "github.com/mjl-/mox/store"
) )
@ -237,6 +236,11 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
msgreader = store.NewMaildirReader(ctl.log, store.CreateMessageTemp, mdnewf, mdcurf) msgreader = store.NewMaildirReader(ctl.log, store.CreateMessageTemp, mdnewf, mdcurf)
} }
// todo: one goroutine for reading messages, one for parsing the message, one adding to database, one for junk filter training.
n := 0
a.WithWLock(func() {
var changes []store.Change
tx, err := a.DB.Begin(ctx, true) tx, err := a.DB.Begin(ctx, true)
ctl.xcheck(err, "begin transaction") ctl.xcheck(err, "begin transaction")
defer func() { defer func() {
@ -251,7 +255,6 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
// We will be delivering messages. If we fail halfway, we need to remove the created msg files. // We will be delivering messages. If we fail halfway, we need to remove the created msg files.
var deliveredIDs []int64 var deliveredIDs []int64
defer func() { defer func() {
x := recover() x := recover()
if x == nil { if x == nil {
@ -271,38 +274,19 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
err := os.Remove(p) err := os.Remove(p)
ctl.log.Check(err, "closing message file after import error", slog.String("path", p)) ctl.log.Check(err, "closing message file after import error", slog.String("path", p))
} }
deliveredIDs = nil
ctl.xerror(fmt.Sprintf("import error: %v", x)) ctl.xerror(fmt.Sprintf("import error: %v", x))
}() }()
var changes []store.Change
var modseq store.ModSeq // Assigned on first delivered messages, used for all messages. var modseq store.ModSeq // Assigned on first delivered messages, used for all messages.
xdeliver := func(m *store.Message, mf *os.File) {
// todo: possibly set dmarcdomain to the domain of the from address? at least for non-spams that have been seen. otherwise user would start without any reputations. the assumption would be that the user has accepted email and deemed it legit, coming from the indicated sender.
const sync = false
const notrain = true
const nothreads = true
const updateDiskUsage = false
err := a.DeliverMessage(ctl.log, tx, m, mf, sync, notrain, nothreads, updateDiskUsage)
ctl.xcheck(err, "delivering message")
deliveredIDs = append(deliveredIDs, m.ID)
ctl.log.Debug("delivered message", slog.Int64("id", m.ID))
changes = append(changes, m.ChangeAddUID())
}
// todo: one goroutine for reading messages, one for parsing the message, one adding to database, one for junk filter training.
n := 0
a.WithWLock(func() {
// Ensure mailbox exists. // Ensure mailbox exists.
var mb store.Mailbox var mb store.Mailbox
mb, changes, err = a.MailboxEnsure(tx, mailbox, true, store.SpecialUse{}, &modseq) mb, changes, err = a.MailboxEnsure(tx, mailbox, true, store.SpecialUse{}, &modseq)
ctl.xcheck(err, "ensuring mailbox exists") ctl.xcheck(err, "ensuring mailbox exists")
// We ensure keywords in messages make it to the mailbox as well. nkeywords := len(mb.Keywords)
mailboxKeywords := map[string]bool{}
jf, _, err := a.OpenJunkFilter(ctx, ctl.log) jf, _, err := a.OpenJunkFilter(ctx, ctl.log)
if err != nil && !errors.Is(err, store.ErrNoJunkFilter) { if err != nil && !errors.Is(err, store.ErrNoJunkFilter) {
@ -310,7 +294,7 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
} }
defer func() { defer func() {
if jf != nil { if jf != nil {
err = jf.Close() err = jf.CloseDiscard()
ctl.xcheck(err, "close junk filter") ctl.xcheck(err, "close junk filter")
} }
}() }()
@ -323,6 +307,8 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
err = tx.Get(&du) err = tx.Get(&du)
ctl.xcheck(err, "get disk usage") ctl.xcheck(err, "get disk usage")
msgDirs := map[string]struct{}{}
process := func(m *store.Message, msgf *os.File, origPath string) { process := func(m *store.Message, msgf *os.File, origPath string) {
defer store.CloseRemoveTempFile(ctl.log, msgf, "message to import") defer store.CloseRemoveTempFile(ctl.log, msgf, "message to import")
@ -331,11 +317,6 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
ctl.xcheck(fmt.Errorf("account over maximum total message size %d", maxSize), "checking quota") ctl.xcheck(fmt.Errorf("account over maximum total message size %d", maxSize), "checking quota")
} }
for _, kw := range m.Keywords {
mailboxKeywords[kw] = true
}
mb.Add(m.MailboxCounts())
// Parse message and store parsed information for later fast retrieval. // Parse message and store parsed information for later fast retrieval.
p, err := message.EnsurePart(ctl.log.Logger, false, msgf, m.Size) p, err := message.EnsurePart(ctl.log.Logger, false, msgf, m.Size)
if err != nil { if err != nil {
@ -344,7 +325,7 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
m.ParsedBuf, err = json.Marshal(p) m.ParsedBuf, err = json.Marshal(p)
ctl.xcheck(err, "marshal parsed message structure") ctl.xcheck(err, "marshal parsed message structure")
// Set fields needed for future threading. By doing it now, DeliverMessage won't // Set fields needed for future threading. By doing it now, MessageAdd won't
// have to parse the Part again. // have to parse the Part again.
p.SetReaderAt(store.FileMsgReader(m.MsgPrefix, msgf)) p.SetReaderAt(store.FileMsgReader(m.MsgPrefix, msgf))
m.PrepareThreading(ctl.log, &p) m.PrepareThreading(ctl.log, &p)
@ -357,9 +338,6 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
} }
} }
// We set the flags that Deliver would set now and train ourselves. This prevents
// Deliver from training, which would open the junk filter, change it, and write it
// back to disk, for each message (slow).
m.JunkFlagsForMailbox(mb, conf) m.JunkFlagsForMailbox(mb, conf)
if jf != nil && m.NeedsTraining() { if jf != nil && m.NeedsTraining() {
if words, err := jf.ParseMessage(p); err != nil { if words, err := jf.ParseMessage(p); err != nil {
@ -375,13 +353,28 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
var err error var err error
modseq, err = a.NextModSeq(tx) modseq, err = a.NextModSeq(tx)
ctl.xcheck(err, "assigning next modseq") ctl.xcheck(err, "assigning next modseq")
mb.ModSeq = modseq
} }
m.MailboxID = mb.ID m.MailboxID = mb.ID
m.MailboxOrigID = mb.ID m.MailboxOrigID = mb.ID
m.CreateSeq = modseq m.CreateSeq = modseq
m.ModSeq = modseq m.ModSeq = modseq
xdeliver(m, msgf)
// todo: possibly set dmarcdomain to the domain of the from address? at least for non-spams that have been seen. otherwise user would start without any reputations. the assumption would be that the user has accepted email and deemed it legit, coming from the indicated sender.
opts := store.AddOpts{
SkipDirSync: true,
SkipTraining: true,
SkipThreads: true, // We do this efficiently when we have all messages.
SkipUpdateDiskUsage: true, // We do this once at the end.
SkipCheckQuota: true, // We check before.
}
err = a.MessageAdd(ctl.log, tx, &mb, m, msgf, opts)
ctl.xcheck(err, "delivering message")
deliveredIDs = append(deliveredIDs, m.ID)
changes = append(changes, m.ChangeAddUID())
msgDirs[filepath.Dir(a.MessagePath(m.ID))] = struct{}{}
n++ n++
if n%1000 == 0 { if n%1000 == 0 {
@ -405,25 +398,27 @@ func importctl(ctx context.Context, ctl *ctl, mbox bool) {
ctl.xcheck(err, "assigning messages to threads") ctl.xcheck(err, "assigning messages to threads")
} }
// Get mailbox again, uidnext is likely updated. changes = append(changes, mb.ChangeCounts())
mc := mb.MailboxCounts if nkeywords != len(mb.Keywords) {
err = tx.Get(&mb)
ctl.xcheck(err, "get mailbox")
mb.MailboxCounts = mc
// If there are any new keywords, update the mailbox.
var mbKwChanged bool
mb.Keywords, mbKwChanged = store.MergeKeywords(mb.Keywords, maps.Keys(mailboxKeywords))
if mbKwChanged {
changes = append(changes, mb.ChangeKeywords()) changes = append(changes, mb.ChangeKeywords())
} }
err = tx.Update(&mb) err = tx.Update(&mb)
ctl.xcheck(err, "updating message counts and keywords in mailbox") ctl.xcheck(err, "updating message counts and keywords in mailbox")
changes = append(changes, mb.ChangeCounts())
err = a.AddMessageSize(ctl.log, tx, addSize) err = a.AddMessageSize(ctl.log, tx, addSize)
xcheckf(err, "updating total message size") ctl.xcheck(err, "updating total message size")
for msgDir := range msgDirs {
err := moxio.SyncDir(ctl.log, msgDir)
ctl.xcheck(err, "sync dir")
}
if jf != nil {
err := jf.Close()
ctl.log.Check(err, "close junk filter")
jf = nil
}
err = tx.Commit() err = tx.Commit()
ctl.xcheck(err, "commit") ctl.xcheck(err, "commit")

View File

@ -3413,15 +3413,45 @@ func (c *conn) deliver(ctx context.Context, recvHdrFor func(string) string, msgW
} }
if err != nil { if err != nil {
log.Errorx("tidying rejects mailbox", err) log.Errorx("tidying rejects mailbox", err)
} else if hasSpace { } else if !hasSpace {
if err := a.d.acc.DeliverMailbox(log, conf.RejectsMailbox, a.d.m, dataFile); err != nil {
log.Errorx("delivering spammy mail to rejects mailbox", err)
} else {
log.Info("delivered spammy mail to rejects mailbox")
}
} else {
log.Info("not storing spammy mail to full rejects mailbox") log.Info("not storing spammy mail to full rejects mailbox")
return
} }
var changes []store.Change
var stored bool
err = a.d.acc.DB.Write(context.TODO(), func(tx *bstore.Tx) error {
mbrej, err := a.d.acc.MailboxFind(tx, conf.RejectsMailbox)
if err != nil {
return fmt.Errorf("finding rejects mailbox: %v", err)
}
if mbrej == nil {
nmb, chl, _, _, err := a.d.acc.MailboxCreate(tx, conf.RejectsMailbox, store.SpecialUse{})
if err != nil {
return fmt.Errorf("creating rejects mailbox: %v", err)
}
changes = append(changes, chl...)
mbrej = &nmb
}
a.d.m.MailboxID = mbrej.ID
if err := a.d.acc.MessageAdd(log, tx, mbrej, a.d.m, dataFile, store.AddOpts{}); err != nil {
return fmt.Errorf("delivering spammy mail to rejects mailbox: %v", err)
}
if err := tx.Update(mbrej); err != nil {
return fmt.Errorf("updating rejects mailbox: %v", err)
}
changes = append(changes, a.d.m.ChangeAddUID(), mbrej.ChangeCounts())
stored = true
return nil
})
if err != nil {
log.Errorx("delivering to rejects mailbox", err)
return
} else if stored {
log.Info("stored spammy mail in rejects mailbox")
}
store.BroadcastChanges(a.d.acc, changes)
}) })
} }

View File

@ -883,8 +883,16 @@ type Account struct {
// directory when delivering. // directory when delivering.
lastMsgDir string lastMsgDir string
// Write lock must be held for account/mailbox modifications including message delivery. // Write lock must be held when modifying account/mailbox/message/flags/annotations
// Read lock for reading mailboxes/messages. // if the change needs to be synchronized with client connections by broadcasting
// the changes. Changes that are not protocol-visible do not require a lock, the
// database transactions isolate activity, though locking may be necessary to
// protect in-memory-only access.
//
// Read lock for reading mailboxes/messages as a consistent snapsnot (i.e. not
// concurrent changes). For longer transactions, e.g. when reading many messages,
// the lock can be released while continuing to read from the transaction.
//
// When making changes to mailboxes/messages, changes must be broadcasted before // When making changes to mailboxes/messages, changes must be broadcasted before
// releasing the lock to ensure proper UID ordering. // releasing the lock to ensure proper UID ordering.
sync.RWMutex sync.RWMutex
@ -1641,69 +1649,116 @@ func (a *Account) WithRLock(fn func()) {
fn() fn()
} }
// DeliverMessage delivers a mail message to the account. // AddOpts influence which work MessageAdd does. Some callers can batch
// checks/operations efficiently. For convenience and safety, a zero AddOpts does
// all the checks and work.
type AddOpts struct {
SkipCheckQuota bool
// If set, the message size is not added to the disk usage. Caller must do that,
// e.g. for many messages at once. If used together with SkipCheckQuota, the
// DiskUsage is not read for database when adding a message.
SkipUpdateDiskUsage bool
// Do not fsync the delivered message file. Useful when copying message files from
// another mailbox. The hardlink created during delivery only needs a directory
// fsync.
SkipSourceFileSync bool
// The directory in which the message file is delivered, typically with a hard
// link, is not fsynced. Useful when delivering many files. A single or few
// directory fsyncs are more efficient.
SkipDirSync bool
// Do not assign thread information to a message. Useful when importing many
// messages and assigning threads efficiently after importing messages.
SkipThreads bool
// If JunkFilter is set, it is used for training. If not set, and the filter must
// be trained for a message, the junk filter is opened, modified and saved to disk.
JunkFilter *junk.Filter
SkipTraining bool
}
// MessageAdd delivers a mail message to the account.
// //
// The message, with msg.MsgPrefix and msgFile combined, must have a header // The message, with msg.MsgPrefix and msgFile combined, must have a header
// section. The caller is responsible for adding a header separator to // section. The caller is responsible for adding a header separator to
// msg.MsgPrefix if missing from an incoming message. // msg.MsgPrefix if missing from an incoming message.
// //
// If UID is not set, it is assigned automatically.
//
// If the message ModSeq is zero, it is assigned automatically. If the message
// CreateSeq is zero, it is set to ModSeq. The mailbox ModSeq is set to the message
// ModSeq.
//
// If the message does not fit in the quota, an error with ErrOverQuota is returned
// and the mailbox and message are unchanged and the transaction can continue. For
// other errors, the caller must abort the transaction.
//
// If the destination mailbox has the Sent special-use flag, the message is parsed // If the destination mailbox has the Sent special-use flag, the message is parsed
// for its recipients (to/cc/bcc). Their domains are added to Recipients for use in // for its recipients (to/cc/bcc). Their domains are added to Recipients for use in
// reputation classification. // reputation classification.
// //
// If sync is true, the message file and its directory will be synced. Should be // Must be called with account write lock held.
// true for regular mail delivery, but can be false when importing many messages.
// //
// If updateDiskUsage is true, the account total message size (for quota) is // Caller must save the mailbox after MessageAdd returns, and broadcast changes for
// updated. Callers must check if a message can be added within quota before // new the message, updated mailbox counts and possibly new mailbox keywords.
// calling DeliverMessage. func (a *Account) MessageAdd(log mlog.Log, tx *bstore.Tx, mb *Mailbox, m *Message, msgFile *os.File, opts AddOpts) (rerr error) {
//
// If CreateSeq/ModSeq is not set, it is assigned automatically.
//
// Must be called with account rlock or wlock.
//
// Caller must broadcast new message.
//
// Caller must update mailbox counts.
func (a *Account) DeliverMessage(log mlog.Log, tx *bstore.Tx, m *Message, msgFile *os.File, sync, notrain, nothreads, updateDiskUsage bool) (rerr error) {
if m.Expunged { if m.Expunged {
return fmt.Errorf("cannot deliver expunged message") return fmt.Errorf("cannot deliver expunged message")
} }
mb := Mailbox{ID: m.MailboxID} if !opts.SkipUpdateDiskUsage || !opts.SkipCheckQuota {
if err := tx.Get(&mb); err != nil {
return fmt.Errorf("get mailbox: %w", err)
}
m.UID = mb.UIDNext
mb.UIDNext++
if m.CreateSeq == 0 || m.ModSeq == 0 {
modseq, err := a.NextModSeq(tx)
if err != nil {
return fmt.Errorf("assigning next modseq: %w", err)
}
m.CreateSeq = modseq
m.ModSeq = modseq
} else if m.ModSeq < mb.ModSeq {
return fmt.Errorf("cannot deliver message with modseq %d < mailbox modseq %d", m.ModSeq, mb.ModSeq)
}
mb.ModSeq = m.ModSeq
if err := tx.Update(&mb); err != nil {
return fmt.Errorf("updating mailbox nextuid: %w", err)
}
if updateDiskUsage {
du := DiskUsage{ID: 1} du := DiskUsage{ID: 1}
if err := tx.Get(&du); err != nil { if err := tx.Get(&du); err != nil {
return fmt.Errorf("get disk usage: %v", err) return fmt.Errorf("get disk usage: %v", err)
} }
if !opts.SkipCheckQuota {
maxSize := a.QuotaMessageSize()
if maxSize > 0 && m.Size > maxSize-du.MessageSize {
return fmt.Errorf("%w: max size %d bytes", ErrOverQuota, maxSize)
}
}
if !opts.SkipUpdateDiskUsage {
du.MessageSize += m.Size du.MessageSize += m.Size
if err := tx.Update(&du); err != nil { if err := tx.Update(&du); err != nil {
return fmt.Errorf("update disk usage: %v", err) return fmt.Errorf("update disk usage: %v", err)
} }
} }
}
m.MailboxID = mb.ID
if m.MailboxOrigID == 0 {
m.MailboxOrigID = mb.ID
}
if m.UID == 0 {
m.UID = mb.UIDNext
mb.UIDNext++
}
if m.ModSeq == 0 {
modseq, err := a.NextModSeq(tx)
if err != nil {
return fmt.Errorf("assigning next modseq: %w", err)
}
m.ModSeq = modseq
} else if m.ModSeq < mb.ModSeq {
return fmt.Errorf("cannot deliver message with modseq %d < mailbox modseq %d", m.ModSeq, mb.ModSeq)
}
if m.CreateSeq == 0 {
m.CreateSeq = m.ModSeq
}
mb.ModSeq = m.ModSeq
if len(m.Keywords) > 0 {
mb.Keywords, _ = MergeKeywords(mb.Keywords, m.Keywords)
}
conf, _ := a.Conf() conf, _ := a.Conf()
m.JunkFlagsForMailbox(mb, conf) m.JunkFlagsForMailbox(*mb, conf)
var part *message.Part var part *message.Part
if m.ParsedBuf == nil { if m.ParsedBuf == nil {
@ -1749,8 +1804,8 @@ func (a *Account) DeliverMessage(log mlog.Log, tx *bstore.Tx, m *Message, msgFil
} }
// Assign to thread (if upgrade has completed). // Assign to thread (if upgrade has completed).
noThreadID := nothreads noThreadID := opts.SkipThreads
if m.ThreadID == 0 && !nothreads && getPart() != nil { if m.ThreadID == 0 && !opts.SkipThreads && getPart() != nil {
select { select {
case <-a.threadsCompleted: case <-a.threadsCompleted:
if a.threadsErr != nil { if a.threadsErr != nil {
@ -1831,7 +1886,7 @@ func (a *Account) DeliverMessage(log mlog.Log, tx *bstore.Tx, m *Message, msgFil
} }
// Sync file data to disk. // Sync file data to disk.
if sync { if !opts.SkipSourceFileSync {
if err := msgFile.Sync(); err != nil { if err := msgFile.Sync(); err != nil {
return fmt.Errorf("fsync message file: %w", err) return fmt.Errorf("fsync message file: %w", err)
} }
@ -1848,19 +1903,40 @@ func (a *Account) DeliverMessage(log mlog.Log, tx *bstore.Tx, m *Message, msgFil
} }
}() }()
if sync { if !opts.SkipDirSync {
if err := moxio.SyncDir(log, msgDir); err != nil { if err := moxio.SyncDir(log, msgDir); err != nil {
return fmt.Errorf("sync directory: %w", err) return fmt.Errorf("sync directory: %w", err)
} }
} }
if !notrain && m.NeedsTraining() { if !opts.SkipTraining && m.NeedsTraining() && a.HasJunkFilter() {
l := []Message{*m} jf, opened, err := a.ensureJunkFilter(context.TODO(), log, opts.JunkFilter)
if err := a.RetrainMessages(context.TODO(), log, tx, l); err != nil { if err != nil {
return fmt.Errorf("open junk filter: %v", err)
}
defer func() {
if jf != nil && opened {
err := jf.CloseDiscard()
log.Check(err, "closing junk filter without saving")
}
}()
// todo optimize: should let us do the tx.Update of m if needed. we should at least merge it with the common case of setting a thread id. and we should try to merge that with the insert by expliciting getting the next id from bstore.
if err := a.RetrainMessage(context.TODO(), log, tx, jf, m); err != nil {
return fmt.Errorf("training junkfilter: %w", err) return fmt.Errorf("training junkfilter: %w", err)
} }
*m = l[0]
if opened {
err := jf.Close()
jf = nil
if err != nil {
return fmt.Errorf("close junk filter: %w", err)
} }
}
}
mb.MailboxCounts.Add(m.MailboxCounts())
return nil return nil
} }
@ -2277,37 +2353,29 @@ func (a *Account) DeliverMailbox(log mlog.Log, mailbox string, m *Message, msgFi
}() }()
err := a.DB.Write(context.TODO(), func(tx *bstore.Tx) error { err := a.DB.Write(context.TODO(), func(tx *bstore.Tx) error {
if ok, _, err := a.CanAddMessageSize(tx, m.Size); err != nil { mb, chl, err := a.MailboxEnsure(tx, mailbox, true, SpecialUse{}, &m.ModSeq)
return err
} else if !ok {
return ErrOverQuota
}
modseq := m.ModSeq
mb, chl, err := a.MailboxEnsure(tx, mailbox, true, SpecialUse{}, &modseq)
if err != nil { if err != nil {
return fmt.Errorf("ensuring mailbox: %w", err) return fmt.Errorf("ensuring mailbox: %w", err)
} }
m.MailboxID = mb.ID if m.CreateSeq == 0 {
m.MailboxOrigID = mb.ID m.CreateSeq = m.ModSeq
if m.ModSeq == 0 && modseq != 0 { }
m.ModSeq = modseq
m.CreateSeq = modseq nmbkeywords := len(mb.Keywords)
if err := a.MessageAdd(log, tx, &mb, m, msgFile, AddOpts{}); err != nil {
return err
} }
// Update count early, DeliverMessage will update mb too and we don't want to fetch
// it again before updating.
mb.MailboxCounts.Add(m.MailboxCounts())
if err := tx.Update(&mb); err != nil { if err := tx.Update(&mb); err != nil {
return fmt.Errorf("updating mailbox for delivery: %w", err) return fmt.Errorf("updating mailbox for delivery: %w", err)
} }
if err := a.DeliverMessage(log, tx, m, msgFile, true, false, false, true); err != nil {
return err
}
changes = append(changes, chl...) changes = append(changes, chl...)
changes = append(changes, m.ChangeAddUID(), mb.ChangeCounts()) changes = append(changes, m.ChangeAddUID(), mb.ChangeCounts())
if nmbkeywords != len(mb.Keywords) {
changes = append(changes, mb.ChangeKeywords())
}
return nil return nil
}) })
if err != nil { if err != nil {
@ -2925,7 +2993,7 @@ func (a *Account) SendLimitReached(tx *bstore.Tx, recipients []smtp.Path) (msgli
// other mailboxes if they have them, reflected in the returned changes. // other mailboxes if they have them, reflected in the returned changes.
// //
// Name must be in normalized form. // Name must be in normalized form.
func (a *Account) MailboxCreate(tx *bstore.Tx, name string, specialUse SpecialUse) (changes []Change, created []string, exists bool, rerr error) { func (a *Account) MailboxCreate(tx *bstore.Tx, name string, specialUse SpecialUse) (nmb Mailbox, changes []Change, created []string, exists bool, rerr error) {
elems := strings.Split(name, "/") elems := strings.Split(name, "/")
var p string var p string
var modseq ModSeq var modseq ModSeq
@ -2936,22 +3004,23 @@ func (a *Account) MailboxCreate(tx *bstore.Tx, name string, specialUse SpecialUs
p += elem p += elem
exists, err := a.MailboxExists(tx, p) exists, err := a.MailboxExists(tx, p)
if err != nil { if err != nil {
return nil, nil, false, fmt.Errorf("checking if mailbox exists") return Mailbox{}, nil, nil, false, fmt.Errorf("checking if mailbox exists")
} }
if exists { if exists {
if i == len(elems)-1 { if i == len(elems)-1 {
return nil, nil, true, fmt.Errorf("mailbox already exists") return Mailbox{}, nil, nil, true, fmt.Errorf("mailbox already exists")
} }
continue continue
} }
_, nchanges, err := a.MailboxEnsure(tx, p, true, specialUse, &modseq) mb, nchanges, err := a.MailboxEnsure(tx, p, true, specialUse, &modseq)
if err != nil { if err != nil {
return nil, nil, false, fmt.Errorf("ensuring mailbox exists: %v", err) return Mailbox{}, nil, nil, false, fmt.Errorf("ensuring mailbox exists: %v", err)
} }
nmb = mb
changes = append(changes, nchanges...) changes = append(changes, nchanges...)
created = append(created, p) created = append(created, p)
} }
return changes, created, false, nil return nmb, changes, created, false, nil
} }
// MailboxRename renames mailbox mbsrc to dst, and any missing parents for the // MailboxRename renames mailbox mbsrc to dst, and any missing parents for the

View File

@ -89,15 +89,11 @@ func TestMailbox(t *testing.T) {
tcheck(t, err, "sent mailbox") tcheck(t, err, "sent mailbox")
msent.MailboxID = mbsent.ID msent.MailboxID = mbsent.ID
msent.MailboxOrigID = mbsent.ID msent.MailboxOrigID = mbsent.ID
err = acc.DeliverMessage(pkglog, tx, &msent, msgFile, true, false, false, true) err = acc.MessageAdd(pkglog, tx, &mbsent, &msent, msgFile, AddOpts{SkipSourceFileSync: true, SkipDirSync: true})
tcheck(t, err, "deliver message") tcheck(t, err, "deliver message")
if !msent.ThreadMuted || !msent.ThreadCollapsed { if !msent.ThreadMuted || !msent.ThreadCollapsed {
t.Fatalf("thread muted & collapsed should have been copied from parent (duplicate message-id) m") t.Fatalf("thread muted & collapsed should have been copied from parent (duplicate message-id) m")
} }
err = tx.Get(&mbsent)
tcheck(t, err, "get mbsent")
mbsent.Add(msent.MailboxCounts())
err = tx.Update(&mbsent) err = tx.Update(&mbsent)
tcheck(t, err, "update mbsent") tcheck(t, err, "update mbsent")
@ -108,12 +104,8 @@ func TestMailbox(t *testing.T) {
tcheck(t, err, "insert rejects mailbox") tcheck(t, err, "insert rejects mailbox")
mreject.MailboxID = mbrejects.ID mreject.MailboxID = mbrejects.ID
mreject.MailboxOrigID = mbrejects.ID mreject.MailboxOrigID = mbrejects.ID
err = acc.DeliverMessage(pkglog, tx, &mreject, msgFile, true, false, false, true) err = acc.MessageAdd(pkglog, tx, &mbrejects, &mreject, msgFile, AddOpts{SkipSourceFileSync: true, SkipDirSync: true})
tcheck(t, err, "deliver message") tcheck(t, err, "deliver message")
err = tx.Get(&mbrejects)
tcheck(t, err, "get mbrejects")
mbrejects.Add(mreject.MailboxCounts())
err = tx.Update(&mbrejects) err = tx.Update(&mbrejects)
tcheck(t, err, "update mbrejects") tcheck(t, err, "update mbrejects")
@ -223,7 +215,7 @@ func TestMailbox(t *testing.T) {
}) })
tcheck(t, err, "write tx") tcheck(t, err, "write tx")
// todo: check that messages are removed and changes sent. // todo: check that messages are removed.
hasSpace, err := acc.TidyRejectsMailbox(log, "Rejects") hasSpace, err := acc.TidyRejectsMailbox(log, "Rejects")
tcheck(t, err, "tidy rejects mailbox") tcheck(t, err, "tidy rejects mailbox")
if !hasSpace { if !hasSpace {

View File

@ -50,6 +50,18 @@ func (a *Account) OpenJunkFilter(ctx context.Context, log mlog.Log) (*junk.Filte
return f, jf, err return f, jf, err
} }
func (a *Account) ensureJunkFilter(ctx context.Context, log mlog.Log, jfOpt *junk.Filter) (jf *junk.Filter, opened bool, err error) {
if jfOpt != nil {
return jfOpt, false, nil
}
jf, _, err = a.OpenJunkFilter(ctx, log)
if err != nil {
return nil, false, fmt.Errorf("open junk filter: %v", err)
}
return jf, true, nil
}
// RetrainMessages (un)trains messages, if relevant given their flags. Updates // RetrainMessages (un)trains messages, if relevant given their flags. Updates
// m.TrainedJunk after retraining. // m.TrainedJunk after retraining.
func (a *Account) RetrainMessages(ctx context.Context, log mlog.Log, tx *bstore.Tx, msgs []Message) (rerr error) { func (a *Account) RetrainMessages(ctx context.Context, log mlog.Log, tx *bstore.Tx, msgs []Message) (rerr error) {
@ -75,11 +87,11 @@ func (a *Account) RetrainMessages(ctx context.Context, log mlog.Log, tx *bstore.
return fmt.Errorf("open junk filter: %v", err) return fmt.Errorf("open junk filter: %v", err)
} }
defer func() { defer func() {
if jf != nil { if rerr != nil {
err := jf.Close() err := jf.CloseDiscard()
if rerr == nil { log.Check(err, "close junk filter without saving")
rerr = err } else {
} rerr = jf.Close()
} }
}() }()
} }

View File

@ -374,7 +374,9 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
}() }()
// Mailboxes we imported, and message counts. // Mailboxes we imported, and message counts.
mailboxes := map[string]store.Mailbox{} mailboxNames := map[string]*store.Mailbox{}
mailboxIDs := map[int64]*store.Mailbox{}
mailboxKeywordCounts := map[int64]int{}
messages := map[string]int{} messages := map[string]int{}
maxSize := acc.QuotaMessageSize() maxSize := acc.QuotaMessageSize()
@ -390,10 +392,6 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
mailboxKeywords := map[string]map[rune]string{} // Mailbox to 'a'-'z' to flag name. mailboxKeywords := map[string]map[rune]string{} // Mailbox to 'a'-'z' to flag name.
mailboxMissingKeywordMessages := map[string]map[int64]string{} // Mailbox to message id to string consisting of the unrecognized flags. mailboxMissingKeywordMessages := map[string]map[int64]string{} // Mailbox to message id to string consisting of the unrecognized flags.
// We keep the mailboxes we deliver to up to date with count and keywords (non-system flags).
destMailboxCounts := map[int64]store.MailboxCounts{}
destMailboxKeywords := map[int64]map[string]bool{}
// Previous mailbox an event was sent for. We send an event for new mailboxes, when // Previous mailbox an event was sent for. We send an event for new mailboxes, when
// another 100 messages were added, when adding a message to another mailbox, and // another 100 messages were added, when adding a message to another mailbox, and
// finally at the end as a closing statement. // finally at the end as a closing statement.
@ -434,32 +432,31 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
trainMessage(m, p, fmt.Sprintf("message id %d", m.ID)) trainMessage(m, p, fmt.Sprintf("message id %d", m.ID))
} }
xensureMailbox := func(name string) store.Mailbox { xensureMailbox := func(name string) *store.Mailbox {
name = norm.NFC.String(name) name = norm.NFC.String(name)
if strings.ToLower(name) == "inbox" { if strings.ToLower(name) == "inbox" {
name = "Inbox" name = "Inbox"
} }
if mb, ok := mailboxes[name]; ok { if mb, ok := mailboxNames[name]; ok {
return mb return mb
} }
var p string var p string
var mb store.Mailbox var mb *store.Mailbox
for i, e := range strings.Split(name, "/") { for i, e := range strings.Split(name, "/") {
if i == 0 { if i == 0 {
p = e p = e
} else { } else {
p = path.Join(p, e) p = path.Join(p, e)
} }
if _, ok := mailboxes[p]; ok { if _, ok := mailboxNames[p]; ok {
continue continue
} }
q := bstore.QueryTx[store.Mailbox](tx) q := bstore.QueryTx[store.Mailbox](tx)
q.FilterNonzero(store.Mailbox{Name: p}) q.FilterNonzero(store.Mailbox{Name: p})
var err error xmb, err := q.Get()
mb, err = q.Get()
if err == bstore.ErrAbsent { if err == bstore.ErrAbsent {
uidvalidity, err := acc.NextUIDValidity(tx) uidvalidity, err := acc.NextUIDValidity(tx)
ximportcheckf(err, "finding next uid validity") ximportcheckf(err, "finding next uid validity")
@ -470,7 +467,7 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
ximportcheckf(err, "assigning next modseq") ximportcheckf(err, "assigning next modseq")
} }
mb = store.Mailbox{ mb = &store.Mailbox{
Name: p, Name: p,
UIDValidity: uidvalidity, UIDValidity: uidvalidity,
UIDNext: 1, UIDNext: 1,
@ -479,28 +476,32 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
HaveCounts: true, HaveCounts: true,
// Do not assign special-use flags. This existing account probably already has such mailboxes. // Do not assign special-use flags. This existing account probably already has such mailboxes.
} }
err = tx.Insert(&mb) err = tx.Insert(mb)
ximportcheckf(err, "inserting mailbox in database") ximportcheckf(err, "inserting mailbox in database")
if tx.Get(&store.Subscription{Name: p}) != nil { if tx.Get(&store.Subscription{Name: p}) != nil {
err := tx.Insert(&store.Subscription{Name: p}) err := tx.Insert(&store.Subscription{Name: p})
ximportcheckf(err, "subscribing to imported mailbox") ximportcheckf(err, "subscribing to imported mailbox")
} }
changes = append(changes, store.ChangeAddMailbox{Mailbox: mb, Flags: []string{`\Subscribed`}, ModSeq: modseq}) changes = append(changes, store.ChangeAddMailbox{Mailbox: *mb, Flags: []string{`\Subscribed`}, ModSeq: modseq})
} else if err != nil { } else if err != nil {
ximportcheckf(err, "creating mailbox %s (aborting)", p) ximportcheckf(err, "creating mailbox %s (aborting)", p)
} else {
mb = &xmb
} }
if prevMailbox != "" && mb.Name != prevMailbox { if prevMailbox != "" && mb.Name != prevMailbox {
sendEvent("count", importCount{prevMailbox, messages[prevMailbox]}) sendEvent("count", importCount{prevMailbox, messages[prevMailbox]})
} }
mailboxes[mb.Name] = mb mailboxKeywordCounts[mb.ID] = len(mb.Keywords)
mailboxNames[mb.Name] = mb
mailboxIDs[mb.ID] = mb
sendEvent("count", importCount{mb.Name, 0}) sendEvent("count", importCount{mb.Name, 0})
prevMailbox = mb.Name prevMailbox = mb.Name
} }
return mb return mb
} }
xdeliver := func(mb store.Mailbox, m *store.Message, f *os.File, pos string) { xdeliver := func(mb *store.Mailbox, m *store.Message, f *os.File, pos string) {
defer store.CloseRemoveTempFile(log, f, "message file for import") defer store.CloseRemoveTempFile(log, f, "message file for import")
m.MailboxID = mb.ID m.MailboxID = mb.ID
m.MailboxOrigID = mb.ID m.MailboxOrigID = mb.ID
@ -518,19 +519,6 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
m.CreateSeq = modseq m.CreateSeq = modseq
m.ModSeq = modseq m.ModSeq = modseq
mc := destMailboxCounts[mb.ID]
mc.Add(m.MailboxCounts())
destMailboxCounts[mb.ID] = mc
if len(m.Keywords) > 0 {
if destMailboxKeywords[mb.ID] == nil {
destMailboxKeywords[mb.ID] = map[string]bool{}
}
for _, k := range m.Keywords {
destMailboxKeywords[mb.ID][k] = true
}
}
// Parse message and store parsed information for later fast retrieval. // Parse message and store parsed information for later fast retrieval.
p, err := message.EnsurePart(log.Logger, false, f, m.Size) p, err := message.EnsurePart(log.Logger, false, f, m.Size)
if err != nil { if err != nil {
@ -539,7 +527,7 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
m.ParsedBuf, err = json.Marshal(p) m.ParsedBuf, err = json.Marshal(p)
ximportcheckf(err, "marshal parsed message structure") ximportcheckf(err, "marshal parsed message structure")
// Set fields needed for future threading. By doing it now, DeliverMessage won't // Set fields needed for future threading. By doing it now, MessageAdd won't
// have to parse the Part again. // have to parse the Part again.
p.SetReaderAt(store.FileMsgReader(m.MsgPrefix, f)) p.SetReaderAt(store.FileMsgReader(m.MsgPrefix, f))
m.PrepareThreading(log, &p) m.PrepareThreading(log, &p)
@ -555,16 +543,19 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
// We set the flags that Deliver would set now and train ourselves. This prevents // We set the flags that Deliver would set now and train ourselves. This prevents
// Deliver from training, which would open the junk filter, change it, and write it // Deliver from training, which would open the junk filter, change it, and write it
// back to disk, for each message (slow). // back to disk, for each message (slow).
m.JunkFlagsForMailbox(mb, conf) m.JunkFlagsForMailbox(*mb, conf)
if jf != nil && m.NeedsTraining() { if jf != nil && m.NeedsTraining() {
trainMessage(m, p, pos) trainMessage(m, p, pos)
} }
const sync = false opts := store.AddOpts{
const notrain = true SkipDirSync: true,
const nothreads = true SkipTraining: true,
const updateDiskUsage = false SkipThreads: true,
if err := acc.DeliverMessage(log, tx, m, f, sync, notrain, nothreads, updateDiskUsage); err != nil { SkipUpdateDiskUsage: true,
SkipCheckQuota: true,
}
if err := acc.MessageAdd(log, tx, mb, m, f, opts); err != nil {
problemf("delivering message %s: %s (continuing)", pos, err) problemf("delivering message %s: %s (continuing)", pos, err)
return return
} }
@ -754,25 +745,17 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
err := tx.Get(&m) err := tx.Get(&m)
ximportcheckf(err, "get imported message for flag update") ximportcheckf(err, "get imported message for flag update")
mc := destMailboxCounts[m.MailboxID] mb := mailboxIDs[m.MailboxID]
mc.Sub(m.MailboxCounts()) mb.Sub(m.MailboxCounts())
oflags := m.Flags oflags := m.Flags
m.Flags = m.Flags.Set(flags, flags) m.Flags = m.Flags.Set(flags, flags)
m.Keywords = maps.Keys(keywords) m.Keywords = maps.Keys(keywords)
sort.Strings(m.Keywords) sort.Strings(m.Keywords)
mc.Add(m.MailboxCounts()) mb.Add(m.MailboxCounts())
destMailboxCounts[m.MailboxID] = mc
if len(m.Keywords) > 0 { mb.Keywords, _ = store.MergeKeywords(mb.Keywords, m.Keywords)
if destMailboxKeywords[m.MailboxID] == nil {
destMailboxKeywords[m.MailboxID] = map[string]bool{}
}
for _, k := range m.Keywords {
destMailboxKeywords[m.MailboxID][k] = true
}
}
// We train before updating, training may set m.TrainedJunk. // We train before updating, training may set m.TrainedJunk.
if jf != nil && m.NeedsTraining() { if jf != nil && m.NeedsTraining() {
@ -838,23 +821,12 @@ func importMessages(ctx context.Context, log mlog.Log, token string, acc *store.
} }
// Update mailboxes with counts and keywords. // Update mailboxes with counts and keywords.
for mbID, mc := range destMailboxCounts { for _, mb := range mailboxIDs {
mb := store.Mailbox{ID: mbID} err = tx.Update(mb)
err := tx.Get(&mb)
ximportcheckf(err, "loading mailbox for counts and keywords")
if mb.MailboxCounts != mc {
mb.MailboxCounts = mc
changes = append(changes, mb.ChangeCounts())
}
keywords := destMailboxKeywords[mb.ID]
var mbKwChanged bool
mb.Keywords, mbKwChanged = store.MergeKeywords(mb.Keywords, maps.Keys(keywords))
err = tx.Update(&mb)
ximportcheckf(err, "updating mailbox count and keywords") ximportcheckf(err, "updating mailbox count and keywords")
if mbKwChanged {
changes = append(changes, mb.ChangeCounts())
if len(mb.Keywords) != mailboxKeywordCounts[mb.ID] {
changes = append(changes, mb.ChangeKeywords()) changes = append(changes, mb.ChangeKeywords())
} }
} }

View File

@ -1099,24 +1099,18 @@ func (s server) Send(ctx context.Context, req webapi.SendRequest) (resp webapi.S
MsgPrefix: []byte(msgPrefix), MsgPrefix: []byte(msgPrefix),
} }
if ok, maxSize, err := acc.CanAddMessageSize(tx, sentm.Size); err != nil { err = acc.MessageAdd(log, tx, &sentmb, &sentm, dataFile, store.AddOpts{})
xcheckf(err, "checking quota") if err != nil && errors.Is(err, store.ErrOverQuota) {
} else if !ok { panic(webapi.Error{Code: "sentOverQuota", Message: fmt.Sprintf("message was sent, but not stored in sent mailbox: %v", err)})
panic(webapi.Error{Code: "sentOverQuota", Message: fmt.Sprintf("message was sent, but not stored in sent mailbox due to quota of total %d bytes reached", maxSize)}) } else if err != nil {
}
// Update mailbox before delivery, which changes uidnext.
sentmb.Add(sentm.MailboxCounts())
err = tx.Update(&sentmb)
xcheckf(err, "updating sent mailbox for counts")
err = acc.DeliverMessage(log, tx, &sentm, dataFile, true, false, false, true)
if err != nil {
metricSubmission.WithLabelValues("storesenterror").Inc() metricSubmission.WithLabelValues("storesenterror").Inc()
metricked = true metricked = true
} }
xcheckf(err, "message submitted to queue, appending message to Sent mailbox") xcheckf(err, "message submitted to queue, appending message to Sent mailbox")
err = tx.Update(&sentmb)
xcheckf(err, "updating mailbox")
changes = append(changes, sentm.ChangeAddUID(), sentmb.ChangeCounts()) changes = append(changes, sentm.ChangeAddUID(), sentmb.ChangeCounts())
}) })

View File

@ -451,20 +451,15 @@ func (w Webmail) MessageCompose(ctx context.Context, m ComposeMessage, mailboxID
Size: xc.Size, Size: xc.Size,
} }
if ok, maxSize, err := acc.CanAddMessageSize(tx, nm.Size); err != nil { err = acc.MessageAdd(log, tx, &mb, &nm, dataFile, store.AddOpts{})
xcheckf(ctx, err, "checking quota") if err != nil && errors.Is(err, store.ErrOverQuota) {
} else if !ok { xcheckuserf(ctx, err, "checking quota")
xcheckuserf(ctx, fmt.Errorf("account over maximum total message size %d", maxSize), "checking quota")
} }
xcheckf(ctx, err, "storing message in mailbox")
// Update mailbox before delivery, which changes uidnext.
mb.Add(nm.MailboxCounts())
err = tx.Update(&mb) err = tx.Update(&mb)
xcheckf(ctx, err, "updating sent mailbox for counts") xcheckf(ctx, err, "updating sent mailbox for counts")
err = acc.DeliverMessage(log, tx, &nm, dataFile, true, false, false, true)
xcheckf(ctx, err, "storing message in mailbox")
changes = append(changes, nm.ChangeAddUID(), mb.ChangeCounts()) changes = append(changes, nm.ChangeAddUID(), mb.ChangeCounts())
}) })
@ -1027,6 +1022,16 @@ func (w Webmail) MessageSubmit(ctx context.Context, m SubmitMessage) {
panic(x) panic(x)
} }
}() }()
var deliveredIDs []int64
defer func() {
for _, id := range deliveredIDs {
p := acc.MessagePath(id)
err := os.Remove(p)
log.Check(err, "removing delivered message on error", slog.String("path", p))
}
}()
xdbwrite(ctx, acc, func(tx *bstore.Tx) { xdbwrite(ctx, acc, func(tx *bstore.Tx) {
if m.DraftMessageID > 0 { if m.DraftMessageID > 0 {
nchanges := xops.MessageDeleteTx(ctx, log, tx, acc, []int64{m.DraftMessageID}, &modseq) nchanges := xops.MessageDeleteTx(ctx, log, tx, acc, []int64{m.DraftMessageID}, &modseq)
@ -1122,26 +1127,22 @@ func (w Webmail) MessageSubmit(ctx context.Context, m SubmitMessage) {
MsgPrefix: []byte(msgPrefix), MsgPrefix: []byte(msgPrefix),
} }
if ok, maxSize, err := acc.CanAddMessageSize(tx, sentm.Size); err != nil { err = acc.MessageAdd(log, tx, &sentmb, &sentm, dataFile, store.AddOpts{})
xcheckf(ctx, err, "checking quota") if err != nil && errors.Is(err, store.ErrOverQuota) {
} else if !ok { xcheckuserf(ctx, err, "checking quota")
xcheckuserf(ctx, fmt.Errorf("account over maximum total message size %d", maxSize), "checking quota") } else if err != nil {
}
// Update mailbox before delivery, which changes uidnext.
sentmb.Add(sentm.MailboxCounts())
err = tx.Update(&sentmb)
xcheckf(ctx, err, "updating sent mailbox for counts")
err = acc.DeliverMessage(log, tx, &sentm, dataFile, true, false, false, true)
if err != nil {
metricSubmission.WithLabelValues("storesenterror").Inc() metricSubmission.WithLabelValues("storesenterror").Inc()
metricked = true metricked = true
} }
xcheckf(ctx, err, "message submitted to queue, appending message to Sent mailbox") xcheckf(ctx, err, "message submitted to queue, appending message to Sent mailbox")
deliveredIDs = append(deliveredIDs, sentm.ID)
err = tx.Update(&sentmb)
xcheckf(ctx, err, "updating sent mailbox for counts")
changes = append(changes, sentm.ChangeAddUID(), sentmb.ChangeCounts()) changes = append(changes, sentm.ChangeAddUID(), sentmb.ChangeCounts())
}) })
deliveredIDs = nil
store.BroadcastChanges(acc, changes) store.BroadcastChanges(acc, changes)
}) })
@ -1226,7 +1227,7 @@ func (Webmail) MailboxCreate(ctx context.Context, name string) {
xdbwrite(ctx, acc, func(tx *bstore.Tx) { xdbwrite(ctx, acc, func(tx *bstore.Tx) {
var exists bool var exists bool
var err error var err error
changes, _, exists, err = acc.MailboxCreate(tx, name, store.SpecialUse{}) _, changes, _, exists, err = acc.MailboxCreate(tx, name, store.SpecialUse{})
if exists { if exists {
xcheckuserf(ctx, errors.New("mailbox already exists"), "creating mailbox") xcheckuserf(ctx, errors.New("mailbox already exists"), "creating mailbox")
} }

View File

@ -434,7 +434,7 @@ func TestView(t *testing.T) {
ChangeMailboxKeywords: store.ChangeMailboxKeywords{ ChangeMailboxKeywords: store.ChangeMailboxKeywords{
MailboxID: inbox.ID, MailboxID: inbox.ID,
MailboxName: inbox.Name, MailboxName: inbox.Name,
Keywords: []string{`aaa`, `changelabel`}, Keywords: []string{`aaa`, `changelabel`, `testlabel`},
}, },
}) })
chmbcounts.Size = 0 chmbcounts.Size = 0