This commit is contained in:
Mechiel Lukkien
2023-01-30 14:27:06 +01:00
commit cb229cb6cf
1256 changed files with 491723 additions and 0 deletions

3
vendor/github.com/mjl-/bstore/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
/cover.out
/cover.html
/testdata/*.db

7
vendor/github.com/mjl-/bstore/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2022 Mechiel Lukkien
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

20
vendor/github.com/mjl-/bstore/Makefile generated vendored Normal file
View File

@ -0,0 +1,20 @@
build:
go build ./...
go vet ./...
GOARCH=386 go vet ./...
staticcheck ./...
./gendoc.sh
fmt:
go fmt ./...
gofmt -w -s *.go cmd/bstore/*.go
test:
go test -race -shuffle=on -coverprofile cover.out
go tool cover -html=cover.out -o cover.html
benchmark:
go test -bench .
fuzz:
go test -fuzz .

51
vendor/github.com/mjl-/bstore/README.md generated vendored Normal file
View File

@ -0,0 +1,51 @@
bstore is a database library for storing and quering Go struct data.
See https://pkg.go.dev/github.com/mjl-/bstore
MIT-licensed
# Comparison
Bstore is designed as a small, pure Go library that still provides most of the
common data consistency requirements for modest database use cases. Bstore aims
to make basic use of cgo-based libraries, such as sqlite, unnecessary. Sqlite
is a great library, but Go applications that require cgo are hard to
cross-compile. With bstore, cross-compiling to most Go-supported platforms
stays trivial. Although bstore is much more limited in so many aspects than
sqlite, bstore also offers some advantages as well.
- Cross-compilation and reproducibility: Trivial with bstore due to pure Go,
much harder with sqlite because of cgo.
- Code complexity: low with bstore (6k lines including comments/docs), high
with sqlite.
- Query language: mostly-type-checked function calls in bstore, free-form query
strings only checked at runtime with sqlite.
- Functionality: very limited with bstore, much more full-featured with sqlite.
- Schema management: mostly automatic based on Go type definitions in bstore,
manual with ALTER statements in sqlite.
- Types and packing/parsing: automatic/transparent in bstore based on Go types
(including maps, slices, structs and custom MarshalBinary encoding), versus
manual scanning and parameter passing with sqlite with limited set of SQL
types.
- Performance: low to good performance with bstore, high performance with
sqlite.
- Database files: single file with bstore, several files with sqlite (due to
WAL or journal files).
- Test coverage: decent coverage but limited real-world for bstore, versus
extremely thoroughly tested and with enormous real-world use.
# FAQ
Q: Is bstore an ORM?
A: No. The API for bstore may look like an ORM. But instead of mapping bstore
"queries" (function calls) to an SQL query string, bstore executes them
directly without converting to a query language.
Q: How does bstore store its data?
A bstore database is a single-file BoltDB database. BoltDB provides ACID
properties. Bstore uses a BoltDB "bucket" (key/value store) for each Go type
stored, with multiple subbuckets: one for type definitions, one for the actual
data, and one bucket per index. BoltDB stores data in a B+tree. See format.md
for details.

80
vendor/github.com/mjl-/bstore/default.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package bstore
import (
"fmt"
"reflect"
"time"
)
var zerotime = time.Time{}
// applyDefault replaces zero values for fields that have a Default value configured.
func (tv *typeVersion) applyDefault(rv reflect.Value) error {
for _, f := range tv.Fields[1:] {
fv := rv.FieldByIndex(f.structField.Index)
if err := f.applyDefault(fv); err != nil {
return err
}
}
return nil
}
func (f field) applyDefault(rv reflect.Value) error {
switch f.Type.Kind {
case kindBytes, kindBinaryMarshal, kindMap:
return nil
case kindSlice, kindStruct:
return f.Type.applyDefault(rv)
case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindInt64, kindUint, kindUint8, kindUint16, kindUint32, kindUint64, kindFloat32, kindFloat64, kindString, kindTime:
if !f.defaultValue.IsValid() || !rv.IsZero() {
return nil
}
fv := f.defaultValue
// Time is special. "now" is encoded as the zero value of time.Time.
if f.Type.Kind == kindTime && fv.Interface() == zerotime {
now := time.Now().Round(0)
if f.Type.Ptr {
fv = reflect.ValueOf(&now)
} else {
fv = reflect.ValueOf(now)
}
} else if f.Type.Ptr {
fv = reflect.New(f.structField.Type.Elem())
fv.Elem().Set(f.defaultValue)
}
rv.Set(fv)
return nil
default:
return fmt.Errorf("internal error: missing case for %v", f.Type.Kind)
}
}
// only for recursing. we do not support recursing into maps because it would
// involve more work making values settable. and how sensible it it anyway?
func (ft fieldType) applyDefault(rv reflect.Value) error {
if ft.Ptr && (rv.IsZero() || rv.IsNil()) {
return nil
} else if ft.Ptr {
rv = rv.Elem()
}
switch ft.Kind {
case kindSlice:
n := rv.Len()
for i := 0; i < n; i++ {
if err := ft.List.applyDefault(rv.Index(i)); err != nil {
return err
}
}
case kindStruct:
for _, nf := range ft.Fields {
nfv := rv.FieldByIndex(nf.structField.Index)
if err := nf.applyDefault(nfv); err != nil {
return err
}
}
}
return nil
}

142
vendor/github.com/mjl-/bstore/doc.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
/*
Package bstore is a database library for storing and quering Go struct data.
Bstore is designed as a small, pure Go library that still provides most of
the common data consistency requirements for modest database use cases. Bstore
aims to make basic use of cgo-based libraries, such as sqlite, unnecessary.
Bstore implements autoincrementing primary keys, indices, default values,
enforcement of nonzero, unique and referential integrity constraints, automatic
schema updates and a query API for combining filters/sorting/limits. Queries
are planned and executed using indices for fast execution where possible.
Bstores is designed with the Go type system in mind: you typically don't have to
write any (un)marshal code for your types.
# Field types
Struct field types currently supported for storing, including pointers to these
types, but not pointers to pointers:
- int (as int32), int8, int16, int32, int64
- uint (as uint32), uint8, uint16, uint32, uint64
- bool, float32, float64, string, []byte
- Maps, with keys and values of any supported type, except keys with pointer types.
- Slices, with elements of any supported type.
- time.Time
- Types that implement binary.MarshalBinary and binary.UnmarshalBinary, useful
for struct types with state in private fields. Do not change the
(Un)marshalBinary method in an incompatible way without a data migration.
- Structs, with fields of any supported type.
Note: int and uint are stored as int32 and uint32, for compatibility of database
files between 32bit and 64bit systems. Where possible, use explicit (u)int32 or
(u)int64 types.
Embedded structs are handled by storing the individual fields of the embedded
struct. The named embedded type is not part of the type schema, and can
currently only be used with UpdateField and UpdateFields, not for filtering.
Bstore embraces the use of Go zero values. Use zero values, possibly pointers,
where you would use NULL values in SQL.
Types that have not yet been implemented: interface values, (fixed length) arrays,
complex numbers.
# Struct tags
The typical Go struct can be stored in the database. The first field of a
struct type is its primary key, and must always be unique. Additional behaviour
can be configured through struct tag "bstore". The values are comma-separated.
Typically one word, but some have multiple space-separated words:
- "-" ignores the field entirely.
- "name <fieldname>", use "fieldname" instead of the Go type field name.
- "nonzero", enforces that field values are not the zero value.
- "noauto", only valid for integer types, and only for the primary key. By
default, an integer-typed primary key will automatically get a next value
assigned on insert when it is 0. With noauto inserting a 0 value results in an
error. For primary keys of other types inserting the zero value always results
in an error.
- "index" or "index <field1+field2+...> [<name>]", adds an index. In the first
form, the index is on the field on which the tag is specified, and the index
name is the same as the field name. In the second form multiple fields can be
specified, and an optional name. The first field must be the field on which
the tag is specified. The field names are +-separated. The default name for
the second form is the same +-separated string but can be set explicitly to
the second parameter. An index can only be set for basic integer types, bools,
time and strings. Indices are automatically (re)created when registering a
type.
- "unique" or "unique <field1+field2+...> [<name>]", adds an index as with
"index" and also enforces a unique constraint. For time.Time the timezone is
ignored for the uniqueness check.
- "ref <type>", enforces that the value exists as primary key for "type".
Field types must match exactly, e.g. you cannot reference an int with an int64.
An index is automatically created and maintained for fields with a foreign key,
for efficiently checking that removed records in the referenced type are not in
use. If the field has the zero value, the reference is not checked. If you
require a valid reference, add "nonzero".
- "default <value>", replaces a zero value with the specified value on record
insert. Special value "now" is recognized for time.Time as the current time.
Times are parsed as time.RFC3339 otherwise. Supported types: bool
("true"/"false"), integers, floats, strings. Value is not quoted and no escaping
of special characters, like the comma that separates struct tag words, is
possible. Defaults are also replaced on fields in nested structs and
slices, but not in maps.
- "typename <name>", override name of the type. The name of the Go type is
used by default. Can only be present on the first field (primary key).
Useful for doing schema updates.
# Schema updates
Before using a Go type, you must register it for use with the open database by
passing a (zero) value of that type to the Open or Register functions. For each
type, a type definition is stored in the database. If a type has an updated
definition since the previous database open, a new type definition is added to
the database automatically and any required modifications are made: Indexes
(re)created, fields added/removed, new nonzero/unique/reference constraints
validated.
If data/types cannot be updated automatically (e.g. converting an int field into
a string field), custom data migration code is needed. You may have to keep
track of a data/schema version.
As a special case, you can switch field types between pointer and non-pointer
types. With one exception: changing from pointer to non-pointer where the type
has a field that must be nonzer is not allowed. The on-disk encoding will not be
changed, and nil pointers will turn into zero values, and zero values into nil
pointers. Also see section Limitations about pointer types.
Because named embed structs are not part of the type definition, you can
wrap/unwrap fields into a embed/anonymous struct field. No new type definition
is created.
# BoltDB
BoltDB is used as underlying storage. Bolt provides ACID transactions, storing
its data in a B+tree. Only a single write transaction can be active at a time,
but otherwise multiple read-only transactions can be active. Do not start a
blocking read-only transaction while holding a writable transaction or vice
versa, this will cause deadlock.
Bolt uses Go types that are memory mapped to the database file. This means bolt
database files cannot be transferred between machines with different endianness.
Bolt uses explicit widths for its types, so files can be transferred between
32bit and 64bit machines of same endianness.
# Limitations
Bstore does not implement the equivalent of SQL joins, aggregates, and many
other concepts.
Filtering/comparing/sorting on pointer fields is not currently allowed. Pointer
fields cannot have a (unique) index due to the current index format. Using zero
values is recommended instead for now.
Integer field types can be expanded to wider types, but not to a different
signedness or a smaller integer (fewer bits). The primary key of a type cannot
currently be changed.
The first field of a struct is always the primary key. Types requires an
explicit primary key. Autoincrement is only available for the primary key.
*/
package bstore

91
vendor/github.com/mjl-/bstore/equal.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package bstore
import (
"bytes"
"encoding"
"reflect"
"time"
)
// equal checks if ov and v are the same as far as storage is concerned. i.e.
// this only takes stored fields into account. reflect.DeepEqual cannot be used,
// it would take all fields into account, including unexported.
func (tv *typeVersion) equal(ov, v reflect.Value) (r bool) {
if !ov.IsValid() || !v.IsValid() {
return false
}
for _, f := range tv.Fields {
fov := ov.FieldByIndex(f.structField.Index)
fv := v.FieldByIndex(f.structField.Index)
if !f.Type.equal(fov, fv) {
return false
}
}
return true
}
func (ft fieldType) equal(ov, v reflect.Value) (r bool) {
if ov == v {
return true
} else if !ov.IsValid() || !v.IsValid() {
return false
}
if ft.Ptr {
ov = ov.Elem()
v = v.Elem()
}
if ov == v {
return true
} else if !ov.IsValid() || !v.IsValid() {
return false
}
switch ft.Kind {
case kindBytes:
return bytes.Equal(ov.Bytes(), v.Bytes())
case kindMap:
on := ov.Len()
n := v.Len()
if on != n {
return false
}
r := ov.MapRange()
for r.Next() {
vv := v.MapIndex(r.Key())
if !vv.IsValid() || !ft.MapValue.equal(r.Value(), vv) {
return false
}
}
return true
case kindSlice:
on := ov.Len()
n := v.Len()
if on != n {
return false
}
for i := 0; i < n; i++ {
if !ft.List.equal(ov.Index(i), v.Index(i)) {
return false
}
}
return true
case kindTime:
return ov.Interface().(time.Time).Equal(v.Interface().(time.Time))
case kindBinaryMarshal:
obuf, oerr := ov.Interface().(encoding.BinaryMarshaler).MarshalBinary()
buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
if oerr != nil || err != nil {
return false // todo: should propagate error?
}
return bytes.Equal(obuf, buf)
case kindStruct:
for _, f := range ft.Fields {
fov := ov.FieldByIndex(f.structField.Index)
fv := v.FieldByIndex(f.structField.Index)
if !f.Type.equal(fov, fv) {
return false
}
}
return true
}
return ov.Interface() == v.Interface()
}

568
vendor/github.com/mjl-/bstore/exec.go generated vendored Normal file
View File

@ -0,0 +1,568 @@
package bstore
import (
"bytes"
"fmt"
"reflect"
"sort"
"time"
bolt "go.etcd.io/bbolt"
)
// exec represents the execution of a query plan.
type exec[T any] struct {
q *Query[T]
plan *plan[T]
// For queries with explicit PKs filtered on.
// See plan.keys. We remove items from the list when we looked one up, but we keep the slice non-nil.
keys [][]byte
// If -1, no limit is set. This is different from Query where 0 means
// no limit. We count back and 0 means the end.
limit int
data []pair[T] // If not nil (even if empty), serve nextKey requests from here.
ib *bolt.Bucket
rb *bolt.Bucket
forward func() (bk, bv []byte) // Once we start scanning, we prepare forward to next/prev to the following value.
}
// exec creates a new execution for the plan, registering statistics.
func (p *plan[T]) exec(q *Query[T]) *exec[T] {
q.stats.Queries++
if p.idx == nil {
if p.keys != nil {
q.stats.PlanPK++
} else if p.start != nil || p.stop != nil {
q.stats.PlanPKScan++
} else {
q.stats.PlanTableScan++
}
q.stats.LastIndex = ""
} else {
if p.keys != nil {
q.stats.PlanUnique++
} else {
q.stats.PlanIndexScan++
}
q.stats.LastIndex = p.idx.Name
}
if len(p.orders) > 0 {
q.stats.Sort++
}
q.stats.LastOrdered = p.start != nil || p.stop != nil
q.stats.LastAsc = !p.desc
limit := -1
if q.xlimit > 0 {
limit = q.xlimit
}
return &exec[T]{q: q, plan: p, keys: p.keys, limit: limit}
}
// incr treats buf as a bigendian number, increasing it by one. used for reverse
// scans, where we must start beyond the key prefix we are looking for.
func incr(buf []byte) bool {
for i := len(buf) - 1; i >= 0; i-- {
if buf[i] < 255 {
buf[i]++
return true
}
buf[i] = 0
}
return false
}
func cutoff(b []byte, n int) []byte {
if len(b) <= n {
return b
}
return b[:n]
}
// nextKey returns the key and optionally value for the next selected record.
//
// ErrAbsent is returned if there is no more record.
//
// If an error occurs, an error is set on query, except in the case of
// ErrAbsent. ErrAbsent does not finish the query because a Delete or Update
// could follow.
func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
var zero T
q := e.q
if q.err != nil {
return nil, zero, q.err
}
// We collected & sorted data previously. Return from it until done.
// Limit was already applied.
if e.data != nil {
if len(e.data) == 0 {
return nil, zero, ErrAbsent
}
p := e.data[0]
e.data = e.data[1:]
var v T
if value {
var err error
v, err = p.Value(e)
if err != nil {
q.error(err)
return nil, zero, err
}
}
return p.bk, v, nil
}
if e.limit == 0 {
return nil, zero, ErrAbsent
}
// First time we are going to need buckets.
if e.rb == nil {
tx, err := q.tx(write)
if err != nil {
q.error(err)
return nil, zero, err
}
e.rb, err = tx.recordsBucket(q.st.Name, q.st.Current.fillPercent)
if err != nil {
return nil, zero, err
}
if e.plan.idx != nil {
e.ib, err = tx.indexBucket(e.plan.idx)
if err != nil {
return nil, zero, err
}
}
}
// List of IDs (records) or full unique index equality match.
// We can get the records/index value by a simple "get" on the key.
if e.keys != nil {
collect := len(e.plan.orders) > 0
if collect {
e.data = []pair[T]{} // Must be non-nil to get into e.data branch!
}
for i, xk := range e.keys {
var bk, bv []byte
// For indices, we need look up the PK through the index.
if e.plan.idx != nil {
c := e.ib.Cursor()
q.stats.Index.Cursor++
bki, _ := c.Seek(xk)
if !bytes.HasPrefix(bki, xk) {
continue
}
// log.Printf("seek %x, bki %x", xk, bki)
bk = bki[len(xk):]
} else {
bk = xk
}
// We don't need to fetch the full record now if it isn't needed by
// caller. It may be fetch below for more filters.
if value || e.plan.idx == nil {
q.stats.Records.Get++
bv = e.rb.Get(bk)
if bv == nil {
if e.plan.idx != nil {
return nil, zero, fmt.Errorf("%w: record with pk %x referenced through index %q not found", ErrStore, bk, e.plan.idx.Name)
}
continue
}
}
p := pair[T]{bk, bv, nil}
if ok, err := e.checkFilter(&p); err != nil {
return nil, zero, err
} else if !ok {
continue
}
if collect {
e.data = append(e.data, p)
continue
}
// Again, only fetch value if needed.
var v T
if value {
var err error
v, err = p.Value(e)
if err != nil {
q.error(err)
return nil, zero, err
}
}
if e.limit > 0 {
e.limit--
}
e.keys = e.keys[i+1:]
return bk, v, nil
}
if !collect {
return nil, zero, ErrAbsent
}
// Restart, now with data.
e.keys = [][]byte{}
e.sort()
if e.limit > 0 && len(e.data) > e.limit {
e.data = e.data[:e.limit]
}
return q.nextKey(write, value)
}
// We are going to do a scan, either over the records or an index. We may have a start and stop key.
collect := len(e.plan.orders) > 0
if collect {
e.data = []pair[T]{} // Must be non-nil to get into e.data branch on function restart.
}
for {
var xk, xv []byte
if e.forward == nil {
// First time we are in this loop, we set up a cursor and e.forward.
var c *bolt.Cursor
var statsKV *StatsKV
if e.plan.idx == nil {
c = e.rb.Cursor()
statsKV = &q.stats.Records
} else {
c = e.ib.Cursor()
statsKV = &q.stats.Index
}
if !e.plan.desc {
e.forward = c.Next
if e.plan.start != nil {
statsKV.Cursor++
// If e.plan.start does not exist, seek will skip to the
// next value after. Fine because this is ascending order.
xk, xv = c.Seek(e.plan.start)
} else {
statsKV.Cursor++
xk, xv = c.First()
}
} else {
e.forward = c.Prev
if e.plan.start == nil {
statsKV.Cursor++
xk, xv = c.Last()
} else {
start := make([]byte, len(e.plan.start))
copy(start, e.plan.start)
ok := incr(start)
if !ok {
statsKV.Cursor++
// We were at the last representable value. So we simply start at the end.
xk, xv = c.Last()
} else {
statsKV.Cursor++
xk, xv = c.Seek(start)
if xk == nil {
statsKV.Cursor++
xk, xv = c.Last()
}
// We started at the value after where we were requested to start, so we have to
// move until we find a matching key.
// todo: we could take e.plan.stop into account (if set). right now we may be
// seeking all the way to the front without ever seeing a match to stop.
for xk != nil && bytes.Compare(cutoff(xk, len(e.plan.start)), e.plan.start) > 0 {
statsKV.Cursor++
xk, xv = e.forward()
}
}
}
}
} else {
if e.plan.idx == nil {
q.stats.Records.Cursor++
} else {
q.stats.Index.Cursor++
}
xk, xv = e.forward()
// log.Printf("forwarded, %x %x", xk, xv)
}
if xk == nil {
break
}
if e.plan.start != nil && !e.plan.startInclusive && bytes.HasPrefix(xk, e.plan.start) {
continue
}
if e.plan.stop != nil {
cmp := bytes.Compare(cutoff(xk, len(e.plan.stop)), e.plan.stop)
if !e.plan.desc && (e.plan.stopInclusive && cmp > 0 || !e.plan.stopInclusive && cmp >= 0) {
break
} else if e.plan.desc && (e.plan.stopInclusive && cmp < 0 || !e.plan.stopInclusive && cmp <= 0) {
break
}
}
var pk, bv []byte
if e.plan.idx == nil {
pk = xk
bv = xv
} else {
var err error
pk, _, err = e.plan.idx.parseKey(xk, false)
if err != nil {
q.error(err)
return nil, zero, err
}
}
p := pair[T]{pk, bv, nil}
if ok, err := e.checkFilter(&p); err != nil {
return nil, zero, err
} else if !ok {
continue
}
//log.Printf("have kv, %x %x", p.bk, p.bv)
var v T
var err error
if value {
v, err = p.Value(e)
if err != nil {
q.error(err)
return nil, zero, err
}
}
if collect {
e.data = append(e.data, p)
continue
}
if e.limit > 0 {
e.limit--
}
return p.bk, v, nil
}
if !collect {
return nil, zero, ErrAbsent
}
// Restart, now with data.
e.sort()
if e.limit > 0 && len(e.data) > e.limit {
e.data = e.data[:e.limit]
}
return e.nextKey(write, value)
}
// checkFilter checks against the filters for the plan.
func (e *exec[T]) checkFilter(p *pair[T]) (rok bool, rerr error) {
q := e.q
for _, ff := range e.plan.filters {
switch f := ff.(type) {
// note: filterIDs is not here, it is handled earlier to fetch records.
case filterFn[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
if !f.fn(v) {
return
}
case filterEqual[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
if !f.field.Type.equal(frv, f.rvalue) {
return
}
case filterNotEqual[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
if f.field.Type.equal(frv, f.rvalue) {
return
}
case filterIn[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
var have bool
for _, xrv := range f.rvalues {
if f.field.Type.equal(frv, xrv) {
have = true
break
}
}
if !have {
return
}
case filterNotIn[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
for _, xrv := range f.rvalues {
if f.field.Type.equal(frv, xrv) {
return
}
}
case filterCompare[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
fv := rv.FieldByIndex(f.field.structField.Index)
cmp := compare(f.field.Type.Kind, fv, f.value)
switch {
case cmp == 0 && (f.op == opGreaterEqual || f.op == opLessEqual):
case cmp < 0 && (f.op == opLess || f.op == opLessEqual):
case cmp > 0 && (f.op == opGreater || f.op == opGreaterEqual):
default:
return
}
default:
q.errorf("internal error: missing case for filter %T", ff)
return false, q.err
}
}
return true, nil
}
// if type can be compared for filterCompare, eg for greater/less comparison.
func comparable(ft fieldType) bool {
if ft.Ptr {
return false
}
switch ft.Kind {
case kindBytes, kindString, kindBool, kindInt8, kindInt16, kindInt32, kindInt64, kindInt, kindUint8, kindUint16, kindUint32, kindUint64, kindUint, kindFloat32, kindFloat64, kindTime:
return true
default:
return false
}
}
func compare(k kind, a, b reflect.Value) int {
switch k {
case kindBytes:
return bytes.Compare(a.Bytes(), b.Bytes())
case kindString:
sa := a.String()
sb := b.String()
if sa < sb {
return -1
} else if sa > sb {
return 1
}
return 0
case kindBool:
ba := a.Bool()
bb := b.Bool()
if !ba && bb {
return -1
} else if ba && !bb {
return 1
}
return 0
case kindInt8, kindInt16, kindInt32, kindInt64, kindInt:
ia := a.Int()
ib := b.Int()
if ia < ib {
return -1
} else if ia > ib {
return 1
}
return 0
case kindUint8, kindUint16, kindUint32, kindUint64, kindUint:
ia := a.Uint()
ib := b.Uint()
if ia < ib {
return -1
} else if ia > ib {
return 1
}
return 0
case kindFloat32, kindFloat64:
fa := a.Float()
fb := b.Float()
if fa < fb {
return -1
} else if fa > fb {
return 1
}
return 0
case kindTime:
ta := a.Interface().(time.Time)
tb := b.Interface().(time.Time)
if ta.Before(tb) {
return -1
} else if ta.After(tb) {
return 1
}
return 0
}
// todo: internal error, cannot happen
return 0
}
func (e *exec[T]) sort() {
// todo: We should check whether we actually need to load values. We're just
// always it now for the time being because SortStableFunc isn't going to
// give us a *pair (even though it could because of the slice) so we
// couldn't set/cache the value T during sorting.
q := e.q
for i := range e.data {
p := &e.data[i]
if p.value != nil {
continue
}
_, err := p.Value(e)
if err != nil {
q.error(err)
return
}
}
sort.SliceStable(e.data, func(i, j int) bool {
a := e.data[i]
b := e.data[j]
for _, o := range e.plan.orders {
ra := reflect.ValueOf(*a.value)
rb := reflect.ValueOf(*b.value)
rva := ra.FieldByIndex(o.field.structField.Index)
rvb := rb.FieldByIndex(o.field.structField.Index)
cmp := compare(o.field.Type.Kind, rva, rvb)
if cmp == 0 {
continue
}
return cmp < 0 && o.asc || cmp > 0 && !o.asc
}
return false
})
}

387
vendor/github.com/mjl-/bstore/export.go generated vendored Normal file
View File

@ -0,0 +1,387 @@
package bstore
import (
"fmt"
"math"
"reflect"
"strconv"
"time"
bolt "go.etcd.io/bbolt"
)
// Types returns the types present in the database, regardless of whether they
// are currently registered using Open or Register. Useful for exporting data
// with Keys and Records.
func (db *DB) Types() ([]string, error) {
var types []string
err := db.Read(func(tx *Tx) error {
return tx.btx.ForEach(func(bname []byte, b *bolt.Bucket) error {
// note: we do not track stats for types operations.
types = append(types, string(bname))
return nil
})
})
if err != nil {
return nil, err
}
return types, nil
}
// prepareType prepares typeName for export/introspection with DB.Keys,
// DB.Record, DB.Records. It is different in that it does not require a
// reflect.Type to parse into. It parses to a map, e.g. for export to JSON. The
// returned typeVersion has no structFields set in its fields.
func (db *DB) prepareType(tx *Tx, typeName string) (map[uint32]*typeVersion, *typeVersion, *bolt.Bucket, []string, error) {
rb, err := tx.recordsBucket(typeName, 0.5)
if err != nil {
return nil, nil, nil, nil, err
}
tb, err := tx.bucket(bucketKey{typeName, "types"})
if err != nil {
return nil, nil, nil, nil, err
}
versions := map[uint32]*typeVersion{}
var tv *typeVersion
err = tb.ForEach(func(bk, bv []byte) error {
// note: we do not track stats for types operations.
ntv, err := parseSchema(bk, bv)
if err != nil {
return err
}
versions[ntv.Version] = ntv
if tv == nil || ntv.Version > tv.Version {
tv = ntv
}
return nil
})
if err != nil {
return nil, nil, nil, nil, err
}
if tv == nil {
return nil, nil, nil, nil, fmt.Errorf("%w: no type versions", ErrStore)
}
fields := make([]string, len(tv.Fields))
for i, f := range tv.Fields {
fields[i] = f.Name
}
return versions, tv, rb, fields, nil
}
// Keys returns the parsed primary keys for the type "typeName". The type does
// not have to be registered with Open or Register. For use with Record(s) to
// export data.
func (db *DB) Keys(typeName string, fn func(pk any) error) error {
return db.Read(func(tx *Tx) error {
_, tv, rb, _, err := db.prepareType(tx, typeName)
if err != nil {
return err
}
// todo: do not pass nil parser?
v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(nil))).Elem()
return rb.ForEach(func(bk, bv []byte) error {
tx.stats.Records.Cursor++
if err := parsePK(v, bk); err != nil {
return err
}
return fn(v.Interface())
})
})
}
// Record returns the record with primary "key" for "typeName" parsed as map.
// "Fields" is set to the fields of the type. The type does not have to be
// registered with Open or Register. Record parses the data without the Go
// type present. BinaryMarshal fields are returned as bytes.
func (db *DB) Record(typeName, key string, fields *[]string) (map[string]any, error) {
var r map[string]any
err := db.Read(func(tx *Tx) error {
versions, tv, rb, xfields, err := db.prepareType(tx, typeName)
if err != nil {
return err
}
*fields = xfields
var kv any
switch tv.Fields[0].Type.Kind {
case kindBool:
switch key {
case "true":
kv = true
case "false":
kv = false
default:
err = fmt.Errorf("%w: invalid bool %q", ErrParam, key)
}
case kindInt8:
kv, err = strconv.ParseInt(key, 10, 8)
case kindInt16:
kv, err = strconv.ParseInt(key, 10, 16)
case kindInt32:
kv, err = strconv.ParseInt(key, 10, 32)
case kindInt:
kv, err = strconv.ParseInt(key, 10, 32)
case kindInt64:
kv, err = strconv.ParseInt(key, 10, 64)
case kindUint8:
kv, err = strconv.ParseUint(key, 10, 8)
case kindUint16:
kv, err = strconv.ParseUint(key, 10, 16)
case kindUint32:
kv, err = strconv.ParseUint(key, 10, 32)
case kindUint:
kv, err = strconv.ParseUint(key, 10, 32)
case kindUint64:
kv, err = strconv.ParseUint(key, 10, 64)
case kindString:
kv = key
case kindBytes:
kv = []byte(key) // todo: or decode from base64?
default:
return fmt.Errorf("internal error: unknown primary key kind %v", tv.Fields[0].Type.Kind)
}
if err != nil {
return err
}
pkv := reflect.ValueOf(kv)
kind, err := typeKind(pkv.Type())
if err != nil {
return err
}
if kind != tv.Fields[0].Type.Kind {
// Convert from various int types above to required type. The ParseInt/ParseUint
// calls already validated that the values fit.
pkt := reflect.TypeOf(tv.Fields[0].Type.zero(nil))
pkv = pkv.Convert(pkt)
}
k, err := packPK(pkv)
if err != nil {
return err
}
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
record, err := parseMap(versions, k, bv)
if err != nil {
return err
}
r = record
return nil
})
return r, err
}
// Records calls "fn" for each record of "typeName". Records sets "fields" to
// the fields of the type. The type does not have to be registered with Open or
// Register. Record parses the data without the Go type present. BinaryMarshal
// fields are returned as bytes.
func (db *DB) Records(typeName string, fields *[]string, fn func(map[string]any) error) error {
return db.Read(func(tx *Tx) error {
versions, _, rb, xfields, err := db.prepareType(tx, typeName)
if err != nil {
return err
}
*fields = xfields
return rb.ForEach(func(bk, bv []byte) error {
tx.stats.Records.Cursor++
record, err := parseMap(versions, bk, bv)
if err != nil {
return err
}
return fn(record)
})
})
}
// parseMap parses a record into a map with the right typeVersion from versions.
func parseMap(versions map[uint32]*typeVersion, bk, bv []byte) (record map[string]any, rerr error) {
p := &parser{buf: bv, orig: bv}
var version uint32
defer func() {
x := recover()
if x == nil {
return
}
if err, ok := x.(parseErr); ok {
rerr = fmt.Errorf("%w (version %d, buf %x orig %x)", err.err, version, p.buf, p.orig)
return
}
panic(x)
}()
version = uint32(p.Uvarint())
tv := versions[version]
if tv == nil {
return nil, fmt.Errorf("%w: unknown type version %d", ErrStore, version)
}
r := map[string]any{}
v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(p))).Elem()
err := parsePK(v, bk)
if err != nil {
return nil, err
}
r[tv.Fields[0].Name] = v.Interface()
// todo: Should we be looking at the most recent tv, and hiding fields
// that have been removed in a later typeVersion? Like we do for real
// parsing into reflect value?
fm := p.Fieldmap(len(tv.Fields) - 1)
for i, f := range tv.Fields[1:] {
if fm.Nonzero(i) {
r[f.Name] = f.Type.parseValue(p)
} else {
r[f.Name] = f.Type.zero(p)
}
}
if len(p.buf) != 0 {
return nil, fmt.Errorf("%w: leftover data after parsing", ErrStore)
}
return r, nil
}
func (ft fieldType) parseValue(p *parser) any {
switch ft.Kind {
case kindBytes:
return p.TakeBytes(false)
case kindBinaryMarshal:
// We don't have the type available, so we just return the binary data.
return p.TakeBytes(false)
case kindBool:
return true
case kindInt8:
return int8(p.Varint())
case kindInt16:
return int16(p.Varint())
case kindInt32:
return int32(p.Varint())
case kindInt:
i := p.Varint()
if i < math.MinInt32 || i > math.MaxInt32 {
p.Errorf("%w: int %d does not fit in int32", ErrStore, i)
}
return int(i)
case kindInt64:
return p.Varint()
case kindUint8:
return uint8(p.Uvarint())
case kindUint16:
return uint16(p.Uvarint())
case kindUint32:
return uint32(p.Uvarint())
case kindUint:
i := p.Uvarint()
if i > math.MaxUint32 {
p.Errorf("%w: uint %d does not fit in uint32", ErrStore, i)
}
return uint(i)
case kindUint64:
return p.Uvarint()
case kindFloat32:
return math.Float32frombits(uint32(p.Uvarint()))
case kindFloat64:
return math.Float64frombits(p.Uvarint())
case kindString:
return string(p.TakeBytes(false))
case kindTime:
var t time.Time
err := t.UnmarshalBinary(p.TakeBytes(false))
if err != nil {
p.Errorf("%w: parsing time: %v", ErrStore, err)
}
return t
case kindSlice:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
var l []any
for i := 0; i < n; i++ {
if fm.Nonzero(i) {
l = append(l, ft.List.parseValue(p))
} else {
// Always add non-zero elements, or we would
// change the number of elements in a list.
l = append(l, ft.List.zero(p))
}
}
return l
case kindMap:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
m := map[string]any{}
for i := 0; i < n; i++ {
// Converting to string can be ugly, but the best we can do.
k := fmt.Sprintf("%v", ft.MapKey.parseValue(p))
if _, ok := m[k]; ok {
return fmt.Errorf("%w: duplicate key %q in map", ErrStore, k)
}
var v any
if fm.Nonzero(i) {
v = ft.MapValue.parseValue(p)
} else {
v = ft.MapValue.zero(p)
}
m[k] = v
}
return m
case kindStruct:
fm := p.Fieldmap(len(ft.Fields))
m := map[string]any{}
for i, f := range ft.Fields {
if fm.Nonzero(i) {
m[f.Name] = f.Type.parseValue(p)
} else {
m[f.Name] = f.Type.zero(p)
}
}
return m
}
p.Errorf("internal error: unhandled field type %v", ft.Kind)
panic("cannot happen")
}
var zerovalues = map[kind]any{
kindBytes: []byte(nil),
kindBinaryMarshal: []byte(nil), // We don't have the actual type available, so we just return binary data.
kindBool: false,
kindInt8: int8(0),
kindInt16: int16(0),
kindInt32: int32(0),
kindInt: int(0),
kindInt64: int64(0),
kindUint8: uint8(0),
kindUint16: uint16(0),
kindUint32: uint32(0),
kindUint: uint(0),
kindUint64: uint64(0),
kindFloat32: float32(0),
kindFloat64: float64(0),
kindString: "",
kindTime: zerotime,
kindSlice: []any(nil),
kindMap: map[string]any(nil),
kindStruct: map[string]any(nil),
}
func (ft fieldType) zero(p *parser) any {
v, ok := zerovalues[ft.Kind]
if !ok {
p.Errorf("internal error: unhandled zero value for field type %v", ft.Kind)
}
return v
}

78
vendor/github.com/mjl-/bstore/format.md generated vendored Normal file
View File

@ -0,0 +1,78 @@
# Types
Each Go type is stored in its own bucket, after its name. Only subbuckets are
created directly below a type bucket, no key/values. Two subbuckets are always
created: "records" for the data, "types" for the type definitions. Each index
is stored in a subbucket named "index." followed by the name. Unique and
non-unique indices use the same encoding.
# Type versions
Type definitions are stored in the "types" subbucket. The key is a 4 byte
uint32, a version as referenced from a data record. The value is a JSON-encoded
representation of the typeVersion struct.
When a new Go type or changed Go type is registered with a database, a new type
version is added to the "types" subbucket. Data is always inserted/updated with
the most recent type version. But the database may still hold data records
referencing older type versions. Bstore decodes a packed data record with the
referenced type version. For storage efficiency: the type version is reused for
many stored records, a self-describing format (like JSON) would duplicate the
field names in each stored record.
# Record storage
Primary keys of types are used as BoltDB keys and can be of bool, integer
types, strings or byte slices. Floats, time, struct, slice, map, binarymarshal
cannot be stored as primary key. Bools are stored as a single byte 0 or 1.
Integers are stored in their fixed width encoding (eg 4 bytes for 32 bit int).
Signed integers are stored so the fixed-width byte value is ordered for all
signed values, i.e. math.MinInt32 is stored as 4 bytes bigendian with value 0.
For strings and byte slices, only their bytes are stored.
The value stored with a BoltDB key starts with a uvarint "version" of the type.
This refers to a version in the "types" bucket. The primary key is not encoded
again in the data record itself. The remaining fields are space-efficiently
encoded.
After the uvarint version follow as many bytes to fit a bitmap for the direct
struct fields in the type description. Each bit indicates if the value is
nonzero and present in the value that follows. Only non-zero values take up
more space than the single bit and are stored consecutively after the fieldmap:
- Pointers are stored as their non-pointer value. If the pointer is nil, it
is zero in the fieldmap.
- If the underlying type is an signed int or float, or unsigned int, then
varint/uvarint encoding from encoding/binary is used.
- If the underlying type is a string or []byte, uvarint count followed by the
bytes.
- If the underlying type is a bool, the value is always true and no
additional data is present to represent the value. False is represented by
the zero value marked in the fieldmap.
- Slices use a uvarint for the number of elements, followed by a bitmap for
nonzero values, followed by the encoded nonzero elements.
- Maps use a uvariant for the number of key/value pairs, followed by a
fieldmap for the values (the keys are always present), followed by each
pair: key (always present), value (only if nonzero); key, value; etc.
- If a type is an encoding.BinaryUnmarshaler and encoding.BinaryMarshaler,
then its bytes are stored prefixed with its uvarint length.
- If the type is a struct, its fields are encoded with a field map followed
by the its nonzero field values.
- Other types cannot be represented currently.
In a new type version, the type of a field can be changed as long as existing
records can be decoded into the new Go type. E.g. you can change an int32 into
a int64. You can only change an int64 into a int32 if all values you attempt to
read are small enough to fit in an int32. You cannot change between signed and
unsigned integer, or between string and []byte.
# Index storage
Indexes are stored in subbuckets, named starting with "index." followed by the
index name. Keys are a self-delimiting encodings of the fields that make up the
key, followed by the primary key for the "records" bucket. Values are always
empty in index buckets. For bool and integer types, the same fixed with
encoding as for primary keys in the "records" subbucket is used. Strings are
encoded by their bytes (no \0 allowed) followed by a delimiting \0. Unlike
primary keys, an index can cover a field with type time.Time. Times are encoded
with 8 byte seconds followed by the remaining 4 bytes nanoseconds.

13
vendor/github.com/mjl-/bstore/gendoc.sh generated vendored Normal file
View File

@ -0,0 +1,13 @@
#!/bin/sh
(
cat <<EOF
/*
Command bstore provides commands for inspecting a bstore database.
Subcommands:
EOF
go run cmd/bstore/bstore.go 2>&1 | sed 's/^/ /' | grep -v 'exit status'
echo '*/'
echo 'package main'
) >cmd/bstore/doc.go

282
vendor/github.com/mjl-/bstore/keys.go generated vendored Normal file
View File

@ -0,0 +1,282 @@
package bstore
import (
"encoding/binary"
"fmt"
"math"
"reflect"
"time"
)
/*
The records buckets map a primary key to the record data. The primary key is of
a form that we can scan/range over. So fixed with for integers. For strings and
bytes they are just their byte representation. We do not store the PK in the
record data. This means we cannot store a time.Time as primary key, because we
cannot have the timezone encoded for comparison reasons.
Index keys are similar to PK's. Unique and non-unique indices are encoded the
same. The stored values are always empty, the key consists of the field values
the index was created for, followed by the PK. The encoding of a field is nearly
the same as the encoding of that type as a primary key. The differences: strings
end with a \0 to make them self-delimiting; byte slices are not allowed because
they are not self-delimiting; time.Time is allowed because the time is available
in full (with timezone) in the record data.
*/
// packPK returns the PK bytes representation for the PK value rv.
func packPK(rv reflect.Value) ([]byte, error) {
kv := rv.Interface()
var buf []byte
switch k := kv.(type) {
case string:
buf = []byte(k)
case []byte:
buf = k
case bool:
var b byte
if k {
b = 1
}
buf = []byte{b}
case int8:
buf = []byte{byte(uint8(k + math.MinInt8))}
case int16:
buf = binary.BigEndian.AppendUint16(nil, uint16(k+math.MinInt16))
case int32:
buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32))
case int:
if k < math.MinInt32 || k > math.MaxInt32 {
return nil, fmt.Errorf("%w: int %d does not fit in int32", ErrParam, k)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32))
case int64:
buf = binary.BigEndian.AppendUint64(nil, uint64(k+math.MinInt64))
case uint8:
buf = []byte{k}
case uint16:
buf = binary.BigEndian.AppendUint16(nil, k)
case uint32:
buf = binary.BigEndian.AppendUint32(nil, k)
case uint:
if k > math.MaxUint32 {
return nil, fmt.Errorf("%w: uint %d does not fit in uint32", ErrParam, k)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(k))
case uint64:
buf = binary.BigEndian.AppendUint64(nil, k)
default:
return nil, fmt.Errorf("%w: unsupported primary key type %T", ErrType, kv)
}
return buf, nil
}
// parsePK parses primary key bk into rv.
func parsePK(rv reflect.Value, bk []byte) error {
k, err := typeKind(rv.Type())
if err != nil {
return err
}
switch k {
case kindBytes:
buf := make([]byte, len(bk))
copy(buf, bk)
rv.SetBytes(buf)
return nil
case kindString:
rv.SetString(string(bk))
return nil
}
var need int
switch k {
case kindBool, kindInt8, kindUint8:
need = 1
case kindInt16, kindUint16:
need = 2
case kindInt32, kindUint32, kindInt, kindUint:
need = 4
case kindInt64, kindUint64:
need = 8
}
if len(bk) != need {
return fmt.Errorf("%w: got %d bytes for PK, need %d", ErrStore, len(bk), need)
}
switch k {
case kindBool:
rv.SetBool(bk[0] != 0)
case kindInt8:
rv.SetInt(int64(int8(bk[0]) - math.MinInt8))
case kindInt16:
rv.SetInt(int64(int16(binary.BigEndian.Uint16(bk)) - math.MinInt16))
case kindInt32, kindInt:
rv.SetInt(int64(int32(binary.BigEndian.Uint32(bk)) - math.MinInt32))
case kindInt64:
rv.SetInt(int64(int64(binary.BigEndian.Uint64(bk)) - math.MinInt64))
case kindUint8:
rv.SetUint(uint64(bk[0]))
case kindUint16:
rv.SetUint(uint64(binary.BigEndian.Uint16(bk)))
case kindUint32, kindUint:
rv.SetUint(uint64(binary.BigEndian.Uint32(bk)))
case kindUint64:
rv.SetUint(uint64(binary.BigEndian.Uint64(bk)))
default:
// note: we cannot have kindTime as primary key at the moment.
return fmt.Errorf("%w: unsupported primary key type %v", ErrType, rv.Type())
}
return nil
}
// parseKey parses the PK (last element) of an index key.
// If all is set, also gathers the values before and returns them in the second
// parameter.
func (idx *index) parseKey(buf []byte, all bool) ([]byte, [][]byte, error) {
var err error
var keys [][]byte
take := func(n int) {
if len(buf) < n {
err = fmt.Errorf("%w: not enough bytes in index key", ErrStore)
return
}
if all {
keys = append(keys, buf[:n])
}
buf = buf[n:]
}
fields:
for _, f := range idx.Fields {
if err != nil {
break
}
switch f.Type.Kind {
case kindString:
for i, b := range buf {
if b == 0 {
if all {
keys = append(keys, buf[:i])
}
buf = buf[i+1:]
continue fields
}
}
err = fmt.Errorf("%w: bad string without 0 in index key", ErrStore)
case kindBool:
take(1)
case kindInt8, kindUint8:
take(1)
case kindInt16, kindUint16:
take(2)
case kindInt32, kindUint32, kindInt, kindUint:
take(4)
case kindInt64, kindUint64:
take(8)
case kindTime:
take(8 + 4)
}
}
if err != nil {
return nil, nil, err
}
pk := buf
switch idx.tv.Fields[0].Type.Kind {
case kindBool:
take(1)
case kindInt8, kindUint8:
take(1)
case kindInt16, kindUint16:
take(2)
case kindInt32, kindInt, kindUint32, kindUint:
take(4)
case kindInt64, kindUint64:
take(8)
}
if len(pk) != len(buf) && len(buf) != 0 {
return nil, nil, fmt.Errorf("%w: leftover bytes in index key (%x)", ErrStore, buf)
}
if all {
return pk, keys[:len(keys)-1], nil
}
return pk, nil, nil
}
// packKey returns a key to store in an index: first the prefix without pk, then
// the prefix including pk.
func (idx *index) packKey(rv reflect.Value, pk []byte) ([]byte, []byte, error) {
var l []reflect.Value
for _, f := range idx.Fields {
frv := rv.FieldByIndex(f.structField.Index)
l = append(l, frv)
}
return packIndexKeys(l, pk)
}
// packIndexKeys packs values from l, followed by the pk.
// It returns the key prefix (without pk), and full key with pk.
func packIndexKeys(l []reflect.Value, pk []byte) ([]byte, []byte, error) {
var prek, ik []byte
for _, frv := range l {
k, err := typeKind(frv.Type())
if err != nil {
return nil, nil, err
}
var buf []byte
switch k {
case kindBool:
buf = []byte{0}
if frv.Bool() {
buf[0] = 1
}
case kindInt8:
buf = []byte{byte(int8(frv.Int()) + math.MinInt8)}
case kindInt16:
buf = binary.BigEndian.AppendUint16(nil, uint16(int16(frv.Int())+math.MinInt16))
case kindInt32:
buf = binary.BigEndian.AppendUint32(nil, uint32(int32(frv.Int())+math.MinInt32))
case kindInt:
i := frv.Int()
if i < math.MinInt32 || i > math.MaxInt32 {
return nil, nil, fmt.Errorf("%w: int value %d does not fit in int32", ErrParam, i)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(int32(i)+math.MinInt32))
case kindInt64:
buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Int()+math.MinInt64))
case kindUint8:
buf = []byte{byte(frv.Uint())}
case kindUint16:
buf = binary.BigEndian.AppendUint16(nil, uint16(frv.Uint()))
case kindUint32:
buf = binary.BigEndian.AppendUint32(nil, uint32(frv.Uint()))
case kindUint:
i := frv.Uint()
if i > math.MaxUint32 {
return nil, nil, fmt.Errorf("%w: uint value %d does not fit in uint32", ErrParam, i)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(i))
case kindUint64:
buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Uint()))
case kindString:
buf = []byte(frv.String())
for _, c := range buf {
if c == 0 {
return nil, nil, fmt.Errorf("%w: string used as index key cannot have \\0", ErrParam)
}
}
buf = append(buf, 0)
case kindTime:
tm := frv.Interface().(time.Time)
buf = binary.BigEndian.AppendUint64(nil, uint64(tm.Unix()+math.MinInt64))
buf = binary.BigEndian.AppendUint32(buf, uint32(tm.Nanosecond()))
default:
return nil, nil, fmt.Errorf("internal error: bad type %v for index", frv.Type()) // todo: should be caught when making index type
}
ik = append(ik, buf...)
}
n := len(ik)
ik = append(ik, pk...)
prek = ik[:n]
return prek, ik, nil
}

218
vendor/github.com/mjl-/bstore/nonzero.go generated vendored Normal file
View File

@ -0,0 +1,218 @@
package bstore
import (
"fmt"
"reflect"
)
// isZero returns whether v is the zero value for the fields that we store.
// reflect.IsZero cannot be used on structs because it checks private fields as well.
func (ft fieldType) isZero(v reflect.Value) bool {
if !v.IsValid() {
return true
}
if ft.Ptr {
return v.IsNil()
}
switch ft.Kind {
case kindStruct:
for _, f := range ft.Fields {
if !f.Type.isZero(v.FieldByIndex(f.structField.Index)) {
return false
}
}
return true
}
// Use standard IsZero otherwise, also for kindBinaryMarshal.
return v.IsZero()
}
// checkNonzero compare ofields and nfields (from previous type schema vs newly
// created type schema) for nonzero struct tag. If an existing field got a
// nonzero struct tag added, we verify that there are indeed no nonzero values
// in the database. If there are, we return ErrZero.
func (tx *Tx) checkNonzero(st storeType, tv *typeVersion, ofields, nfields []field) error {
// First we gather paths that we need to check, so we can later simply
// execute those steps on all data we need to read.
paths := &follows{}
next:
for _, f := range nfields {
for _, of := range ofields {
if f.Name == of.Name {
err := f.checkNonzeroGather(&of, paths)
if err != nil {
return err
}
continue next
}
}
if err := f.checkNonzeroGather(nil, paths); err != nil {
return err
}
}
if len(paths.paths) == 0 {
// Common case, not reading all data.
return nil
}
// Finally actually do the checks.
// todo: if there are only top-level fields to check, and we have an index, we can use the index check this without reading all data.
return tx.checkNonzeroPaths(st, tv, paths.paths)
}
type follow struct {
mapKey, mapValue bool
field field
}
type follows struct {
current []follow
paths [][]follow
}
func (f *follows) push(ff follow) {
f.current = append(f.current, ff)
}
func (f *follows) pop() {
f.current = f.current[:len(f.current)-1]
}
func (f *follows) add() {
f.paths = append(f.paths, append([]follow{}, f.current...))
}
func (f field) checkNonzeroGather(of *field, paths *follows) error {
paths.push(follow{field: f})
defer paths.pop()
if f.Nonzero && (of == nil || !of.Nonzero) {
paths.add()
}
if of != nil {
return f.Type.checkNonzeroGather(of.Type, paths)
}
return nil
}
func (ft fieldType) checkNonzeroGather(oft fieldType, paths *follows) error {
switch ft.Kind {
case kindMap:
paths.push(follow{mapKey: true})
if err := ft.MapKey.checkNonzeroGather(*oft.MapKey, paths); err != nil {
return err
}
paths.pop()
paths.push(follow{mapValue: true})
if err := ft.MapValue.checkNonzeroGather(*oft.MapValue, paths); err != nil {
return err
}
paths.pop()
case kindSlice:
err := ft.List.checkNonzeroGather(*oft.List, paths)
if err != nil {
return err
}
case kindStruct:
next:
for _, ff := range ft.Fields {
for _, off := range oft.Fields {
if ff.Name == off.Name {
err := ff.checkNonzeroGather(&off, paths)
if err != nil {
return err
}
continue next
}
}
err := ff.checkNonzeroGather(nil, paths)
if err != nil {
return err
}
}
}
return nil
}
// checkNonzero reads through all records of a type, and checks that the fields
// indicated by paths are nonzero. If not, ErrZero is returned.
func (tx *Tx) checkNonzeroPaths(st storeType, tv *typeVersion, paths [][]follow) error {
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
return rb.ForEach(func(bk, bv []byte) error {
tx.stats.Records.Cursor++
rv, err := st.parseNew(bk, bv)
if err != nil {
return err
}
// todo optimization: instead of parsing the full record, use the fieldmap to see if the value is nonzero.
for _, path := range paths {
frv := rv.FieldByIndex(path[0].field.structField.Index)
if err := path[0].field.checkNonzero(frv, path[1:]); err != nil {
return err
}
}
return nil
})
}
func (f field) checkNonzero(rv reflect.Value, path []follow) error {
if len(path) == 0 {
if !f.Nonzero {
return fmt.Errorf("internal error: checkNonzero: expected field to have Nonzero set")
}
if f.Type.isZero(rv) {
return fmt.Errorf("%w: field %q", ErrZero, f.Name)
}
return nil
}
return f.Type.checkNonzero(rv, path)
}
func (ft fieldType) checkNonzero(rv reflect.Value, path []follow) error {
switch ft.Kind {
case kindMap:
follow := path[0]
path = path[1:]
key := follow.mapKey
if !key && !follow.mapValue {
return fmt.Errorf("internal error: following map, expected mapKey or mapValue, got %#v", follow)
}
iter := rv.MapRange()
for iter.Next() {
var err error
if key {
err = ft.MapKey.checkNonzero(iter.Key(), path)
} else {
err = ft.MapValue.checkNonzero(iter.Value(), path)
}
if err != nil {
return err
}
}
case kindSlice:
n := rv.Len()
for i := 0; i < n; i++ {
if err := ft.List.checkNonzero(rv.Index(i), path); err != nil {
return err
}
}
case kindStruct:
follow := path[0]
path = path[1:]
frv := rv.FieldByIndex(follow.field.structField.Index)
if err := follow.field.checkNonzero(frv, path); err != nil {
return err
}
default:
return fmt.Errorf("internal error: checkNonzero with non-empty path, but kind %v", ft.Kind)
}
return nil
}

276
vendor/github.com/mjl-/bstore/pack.go generated vendored Normal file
View File

@ -0,0 +1,276 @@
package bstore
import (
"bytes"
"encoding"
"encoding/binary"
"fmt"
"math"
"reflect"
"time"
)
// fieldmap represents a bitmap indicating which fields are actually stored and
// can be parsed. zero values for fields are not otherwise stored.
type fieldmap struct {
max int // Required number of fields.
buf []byte // Bitmap, we write the next 0/1 at bit n.
n int // Fields seen so far.
offset int // In final output, we write buf back after finish. Only relevant for packing.
Errorf func(format string, args ...any)
}
// add bit to fieldmap indicating if the field is nonzero.
func (f *fieldmap) Field(nonzero bool) {
o := f.n / 8
if f.n >= f.max {
f.Errorf("internal error: too many fields, max %d", f.max)
}
if nonzero {
f.buf[o] |= 1 << (7 - f.n%8)
}
f.n++
}
// check if field i is nonzero.
func (f *fieldmap) Nonzero(i int) bool {
v := f.buf[i/8]&(1<<(7-i%8)) != 0
return v
}
type packer struct {
b *bytes.Buffer
offset int
fieldmaps []*fieldmap // Pending fieldmaps, not excluding fieldmap below.
fieldmap *fieldmap // Currently active.
popped []*fieldmap // Completed fieldmaps, to be written back during finish.
}
func (p *packer) Errorf(format string, args ...any) {
panic(packErr{fmt.Errorf(format, args...)})
}
// Push a new fieldmap on the stack for n fields.
func (p *packer) PushFieldmap(n int) {
p.fieldmaps = append(p.fieldmaps, p.fieldmap)
buf := make([]byte, (n+7)/8)
p.fieldmap = &fieldmap{max: n, buf: buf, offset: p.offset, Errorf: p.Errorf}
p.Write(buf) // Updates offset. Write errors cause panic.
}
// Pop a fieldmap from the stack. It is remembered in popped for writing the
// bytes during finish.
func (p *packer) PopFieldmap() {
if p.fieldmap.n != p.fieldmap.max {
p.Errorf("internal error: fieldmap n %d != max %d", p.fieldmap.n, p.fieldmap.max)
}
p.popped = append(p.popped, p.fieldmap)
p.fieldmap = p.fieldmaps[len(p.fieldmaps)-1]
p.fieldmaps = p.fieldmaps[:len(p.fieldmaps)-1]
}
// Finish writes back finished (popped) fieldmaps to the correct offset,
// returning the final bytes representation of this record.
func (p *packer) Finish() []byte {
if p.fieldmap != nil {
p.Errorf("internal error: leftover fieldmap during finish")
}
buf := p.b.Bytes()
for _, f := range p.popped {
copy(buf[f.offset:], f.buf)
}
return buf
}
// Field adds field with nonzeroness to the current fieldmap.
func (p *packer) Field(nonzero bool) {
p.fieldmap.Field(nonzero)
}
func (p *packer) Write(buf []byte) (int, error) {
n, err := p.b.Write(buf)
if err != nil {
p.Errorf("write: %w", err)
}
if n > 0 {
p.offset += n
}
return n, err
}
func (p *packer) AddBytes(buf []byte) {
p.Uvarint(uint64(len(buf)))
p.Write(buf) // Write errors cause panic.
}
func (p *packer) Uvarint(v uint64) {
buf := make([]byte, binary.MaxVarintLen64)
o := binary.PutUvarint(buf, v)
p.Write(buf[:o]) // Write errors cause panic.
}
func (p *packer) Varint(v int64) {
buf := make([]byte, binary.MaxVarintLen64)
o := binary.PutVarint(buf, v)
p.Write(buf[:o]) // Write errors cause panic.
}
type packErr struct {
err error
}
// pack rv (reflect.Struct), excluding the primary key field.
func (st storeType) pack(rv reflect.Value) (rbuf []byte, rerr error) {
p := &packer{b: &bytes.Buffer{}}
defer func() {
x := recover()
if x == nil {
return
}
perr, ok := x.(packErr)
if ok {
rerr = perr.err
return
}
panic(x)
}()
st.Current.pack(p, rv)
return p.Finish(), nil
}
func (tv typeVersion) pack(p *packer, rv reflect.Value) {
// When parsing, the same typeVersion (type schema) is used to
// interpret the bytes correctly.
p.Uvarint(uint64(tv.Version))
p.PushFieldmap(len(tv.Fields) - 1)
for _, f := range tv.Fields[1:] {
nrv := rv.FieldByIndex(f.structField.Index)
if f.Type.isZero(nrv) {
if f.Nonzero {
p.Errorf("%w: %q", ErrZero, f.Name)
}
p.Field(false)
// Pretend to pack to get the nonzero checks.
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv)
}
} else {
p.Field(true)
f.Type.pack(p, nrv)
}
}
p.PopFieldmap()
}
// pack the nonzero value rv.
func (ft fieldType) pack(p *packer, rv reflect.Value) {
if ft.Ptr {
rv = rv.Elem()
}
switch ft.Kind {
case kindBytes:
p.AddBytes(rv.Bytes())
case kindBinaryMarshal:
v := rv
buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
p.Errorf("marshalbinary: %w", err)
}
p.AddBytes(buf)
case kindBool:
// No value needed. If false, it would be zero, handled above,
// with a 0 in the fieldmap.
case kindInt:
v := rv.Int()
if v < math.MinInt32 || v > math.MaxInt32 {
p.Errorf("%w: int %d does not fit in int32", ErrParam, v)
}
p.Varint(v)
case kindInt8, kindInt16, kindInt32, kindInt64:
p.Varint(rv.Int())
case kindUint8, kindUint16, kindUint32, kindUint64:
p.Uvarint(rv.Uint())
case kindUint:
v := rv.Uint()
if v > math.MaxUint32 {
p.Errorf("%w: uint %d does not fit in uint32", ErrParam, v)
}
p.Uvarint(v)
case kindFloat32:
p.Uvarint(uint64(math.Float32bits(rv.Interface().(float32))))
case kindFloat64:
p.Uvarint(uint64(math.Float64bits(rv.Interface().(float64))))
case kindString:
p.AddBytes([]byte(rv.String()))
case kindTime:
buf, err := rv.Interface().(time.Time).MarshalBinary()
if err != nil {
p.Errorf("%w: pack time: %s", ErrParam, err)
}
p.AddBytes(buf)
case kindSlice:
n := rv.Len()
p.Uvarint(uint64(n))
p.PushFieldmap(n)
for i := 0; i < n; i++ {
nrv := rv.Index(i)
if ft.List.isZero(nrv) {
p.Field(false)
// Pretend to pack to get the nonzero checks of the element.
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
ft.List.pack(&packer{b: &bytes.Buffer{}}, nrv)
}
} else {
p.Field(true)
ft.List.pack(p, nrv)
}
}
p.PopFieldmap()
case kindMap:
// We write a fieldmap for zeroness of the values. The keys are unique, so there
// can only be max 1 zero key. But there can be many zero values. struct{} is
// common in Go, good to support that efficiently.
n := rv.Len()
p.Uvarint(uint64(n))
p.PushFieldmap(n)
iter := rv.MapRange()
for iter.Next() {
ft.MapKey.pack(p, iter.Key())
v := iter.Value()
if ft.MapValue.isZero(v) {
p.Field(false)
// Pretend to pack to get the nonzero checks of the key type.
if v.IsValid() && (v.Kind() != reflect.Ptr || !v.IsNil()) {
ft.MapValue.pack(&packer{b: &bytes.Buffer{}}, v)
}
} else {
p.Field(true)
ft.MapValue.pack(p, v)
}
}
p.PopFieldmap()
case kindStruct:
p.PushFieldmap(len(ft.Fields))
for _, f := range ft.Fields {
nrv := rv.FieldByIndex(f.structField.Index)
if f.Type.isZero(nrv) {
if f.Nonzero {
p.Errorf("%w: %q", ErrZero, f.Name)
}
p.Field(false)
// Pretend to pack to get the nonzero checks.
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv)
}
} else {
p.Field(true)
f.Type.pack(p, nrv)
}
}
p.PopFieldmap()
default:
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
}
}

321
vendor/github.com/mjl-/bstore/parse.go generated vendored Normal file
View File

@ -0,0 +1,321 @@
package bstore
import (
"encoding"
"encoding/binary"
"fmt"
"math"
"reflect"
"time"
)
type parser struct {
buf []byte
orig []byte
}
func (p *parser) Errorf(format string, args ...any) {
panic(parseErr{fmt.Errorf(format, args...)})
}
func (p *parser) checkInt(un uint64) int {
if un > math.MaxInt32 {
p.Errorf("%w: uvarint %d does not fit in int32", ErrStore, un)
}
return int(un)
}
// Fieldmap starts a new fieldmap for n fields.
func (p *parser) Fieldmap(n int) *fieldmap {
// log.Printf("parse fieldmap %d bits", n)
nb := (n + 7) / 8
buf := p.Take(nb)
return &fieldmap{n, buf, 0, 0, p.Errorf}
}
// Take reads nb bytes.
func (p *parser) Take(nb int) []byte {
// log.Printf("take %d", nb)
if len(p.buf) < nb {
p.Errorf("%w: not enough bytes", ErrStore)
}
buf := p.buf[:nb]
p.buf = p.buf[nb:]
return buf
}
// TakeBytes reads a uvarint representing the size of the bytes, followed by
// that number of bytes.
// dup is needed if you need to hold on to the bytes. Values from BoltDB are
// only valid in the transaction, and not meant to be modified and are
// memory-mapped read-only.
func (p *parser) TakeBytes(dup bool) []byte {
un := p.Uvarint()
n := p.checkInt(un)
buf := p.Take(n)
if dup {
// todo: check for a max size, beyond which we refuse to allocate?
nbuf := make([]byte, len(buf))
copy(nbuf, buf)
buf = nbuf
}
return buf
}
func (p *parser) Uvarint() uint64 {
v, n := binary.Uvarint(p.buf)
if n == 0 {
p.Errorf("%w: uvarint: not enough bytes", ErrStore)
}
if n < 0 {
p.Errorf("%w: uvarint overflow", ErrStore)
}
// log.Printf("take uvarint, %d bytes", n)
p.buf = p.buf[n:]
return v
}
func (p *parser) Varint() int64 {
v, n := binary.Varint(p.buf)
if n == 0 {
p.Errorf("%w: varint: not enough bytes", ErrStore)
}
if n < 0 {
p.Errorf("%w: varint overflow", ErrStore)
}
// log.Printf("take varint, %d bytes", n)
p.buf = p.buf[n:]
return v
}
type parseErr struct {
err error
}
// parse rv (reflect.Struct) from buf.
// does not part primary key field.
func (st storeType) parse(rv reflect.Value, buf []byte) (rerr error) {
p := &parser{buf: buf, orig: buf}
var version uint32
defer func() {
x := recover()
if x == nil {
return
}
perr, ok := x.(parseErr)
if ok {
rerr = fmt.Errorf("%w (version %d, buf %x, orig %x)", perr.err, version, p.buf, p.orig)
return
}
panic(x)
}()
version = uint32(p.Uvarint())
tv, ok := st.Versions[version]
if !ok {
return fmt.Errorf("%w: unknown type version %d", ErrStore, version)
}
tv.parse(p, rv)
if len(p.buf) != 0 {
return fmt.Errorf("%w: leftover data after parsing", ErrStore)
}
return nil
}
// parseNew parses bk and bv into a newly created value of type st.Type.
func (st storeType) parseNew(bk, bv []byte) (reflect.Value, error) {
rv := reflect.New(st.Type).Elem()
if err := st.parseFull(rv, bk, bv); err != nil {
return reflect.Value{}, err
}
return rv, nil
}
// parseFull parses a full record from bk and bv into value rv, which must be
// of type st.Type.
func (st storeType) parseFull(rv reflect.Value, bk, bv []byte) error {
if err := parsePK(rv.Field(0), bk); err != nil {
return err
}
err := st.parse(rv, bv)
if err != nil {
return err
}
return nil
}
func (tv typeVersion) parse(p *parser, rv reflect.Value) {
// First field is the primary key, stored as boltdb key only, not in
// the value.
fm := p.Fieldmap(len(tv.Fields) - 1)
for i, f := range tv.Fields[1:] {
if f.structField.Type == nil {
// Do not parse this field in the current Go type, but
// we must still skip over the bytes.
if fm.Nonzero(i) {
f.Type.skip(p)
}
continue
}
if fm.Nonzero(i) {
f.Type.parse(p, rv.FieldByIndex(f.structField.Index))
} else if f.Nonzero {
// Consistency check. Should not happen, we enforce nonzeroness.
p.Errorf("%w: unexpected nonzero value for %q", ErrStore, f.Name)
} else {
rv.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type))
}
}
}
// parse a nonzero fieldType.
func (ft fieldType) parse(p *parser, rv reflect.Value) {
// Because we allow schema changes from ptr to nonptr, rv can be a pointer or direct value regardless of ft.Ptr.
if rv.Kind() == reflect.Ptr {
nrv := reflect.New(rv.Type().Elem())
rv.Set(nrv)
rv = nrv.Elem()
}
switch ft.Kind {
case kindBytes:
rv.SetBytes(p.TakeBytes(true))
case kindBinaryMarshal:
buf := p.TakeBytes(false)
t := rv.Type()
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
v := reflect.New(t)
err := v.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(buf)
if err != nil {
panic(parseErr{err})
}
if rv.Type().Kind() == reflect.Ptr {
rv.Set(v)
} else {
rv.Set(v.Elem())
}
case kindBool:
rv.SetBool(true)
case kindInt:
v := p.Varint()
if v < math.MinInt32 || v > math.MaxInt32 {
p.Errorf("%w: int %d does not fit in int32", ErrStore, v)
}
rv.SetInt(v)
case kindInt8, kindInt16, kindInt32, kindInt64:
rv.SetInt(p.Varint())
case kindUint:
v := p.Uvarint()
if v > math.MaxUint32 {
p.Errorf("%w: uint %d does not fit in uint32", ErrStore, v)
}
rv.SetUint(v)
case kindUint8, kindUint16, kindUint32, kindUint64:
rv.SetUint(p.Uvarint())
case kindFloat32:
rv.SetFloat(float64(math.Float32frombits(uint32(p.Uvarint()))))
case kindFloat64:
rv.SetFloat(math.Float64frombits(p.Uvarint()))
case kindString:
rv.SetString(string(p.TakeBytes(false)))
case kindTime:
err := rv.Addr().Interface().(*time.Time).UnmarshalBinary(p.TakeBytes(false))
if err != nil {
p.Errorf("%w: parsing time: %s", ErrStore, err)
}
case kindSlice:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
slc := reflect.MakeSlice(rv.Type(), n, n)
for i := 0; i < int(n); i++ {
if fm.Nonzero(i) {
ft.List.parse(p, slc.Index(i))
}
}
rv.Set(slc)
case kindMap:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
mp := reflect.MakeMapWithSize(rv.Type(), n)
for i := 0; i < n; i++ {
mk := reflect.New(rv.Type().Key()).Elem()
ft.MapKey.parse(p, mk)
mv := reflect.New(rv.Type().Elem()).Elem()
if fm.Nonzero(i) {
ft.MapValue.parse(p, mv)
}
mp.SetMapIndex(mk, mv)
}
rv.Set(mp)
case kindStruct:
fm := p.Fieldmap(len(ft.Fields))
strct := reflect.New(rv.Type()).Elem()
for i, f := range ft.Fields {
if f.structField.Type == nil {
f.Type.skip(p)
continue
}
if fm.Nonzero(i) {
f.Type.parse(p, strct.FieldByIndex(f.structField.Index))
} else if f.Nonzero {
// Consistency check, we enforce that nonzero is not stored if not allowed.
p.Errorf("%w: %q", ErrZero, f.Name)
} else {
strct.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type))
}
}
rv.Set(strct)
default:
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
}
}
// skip over the bytes for this fieldType. Needed when an older typeVersion has
// a field that the current reflect.Type does not (can) have.
func (ft fieldType) skip(p *parser) {
switch ft.Kind {
case kindBytes, kindBinaryMarshal, kindString:
p.TakeBytes(false)
case kindBool:
case kindInt8, kindInt16, kindInt32, kindInt, kindInt64:
p.Varint()
case kindUint8, kindUint16, kindUint32, kindUint, kindUint64, kindFloat32, kindFloat64:
p.Uvarint()
case kindTime:
p.TakeBytes(false)
case kindSlice:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
for i := 0; i < n; i++ {
if fm.Nonzero(i) {
ft.List.skip(p)
}
}
case kindMap:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
for i := 0; i < n; i++ {
ft.MapKey.skip(p)
if fm.Nonzero(i) {
ft.MapValue.skip(p)
}
}
case kindStruct:
fm := p.Fieldmap(len(ft.Fields))
for i, f := range ft.Fields {
if fm.Nonzero(i) {
f.Type.skip(p)
}
}
default:
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
}
}

341
vendor/github.com/mjl-/bstore/plan.go generated vendored Normal file
View File

@ -0,0 +1,341 @@
package bstore
import (
"bytes"
"fmt"
"reflect"
"sort"
)
// Plan represents a plan to execute a query, possibly using a simple/quick
// bucket "get" or cursor scan (forward/backward) on either the records or an
// index.
type plan[T any] struct {
// The index for this plan. If nil, we are using pk's, in which case
// "keys" below can be nil for a range scan with start/stop (possibly empty
// for full scan), or non-nil for looking up specific keys.
idx *index
// Use full unique index to get specific values from keys. idx above can be
// a unique index that we only use partially. In that case, this field is
// false.
unique bool
// If not nil, used to fetch explicit keys when using pk or unique
// index. Required non-nil for unique.
keys [][]byte
desc bool // Direction of the range scan.
start []byte // First key to scan. Filters below may still apply. If desc, this value is > than stop (if it is set). If nil, we begin ranging at the first or last (for desc) key.
stop []byte // Last key to scan. Can be nil independently of start.
startInclusive bool // If the start and stop values are inclusive or exclusive.
stopInclusive bool
// Filter we need to apply on after retrieving the record. If all
// original filters from a query were handled by "keys" above, or by a
// range scan, this field is empty.
filters []filter[T]
// Orders we need to apply after first retrieving all records. As with
// filters, if a range scan takes care of an ordering from the query,
// this field is empty.
orders []order
}
// selectPlan selects the best plan for this query.
func (q *Query[T]) selectPlan() (*plan[T], error) {
// Simple case first: List of known IDs. We can just fetch them from
// the records bucket by their primary keys. This is common for a
// "Get" query.
if q.xfilterIDs != nil {
orders := q.xorders
keys := q.xfilterIDs.pks
// If there is an ordering on the PK field, we do the ordering here.
if len(orders) > 0 && orders[0].field.Name == q.st.Current.Fields[0].Name {
asc := orders[0].asc
sort.Slice(keys, func(i, j int) bool {
cmp := bytes.Compare(keys[i], keys[j])
return asc && cmp < 0 || !asc && cmp > 0
})
orders = orders[1:]
}
p := &plan[T]{
keys: keys,
filters: q.xfilters,
orders: orders,
}
return p, nil
}
// Try using a fully matched unique index. We build a map with all
// fields that have an equal or in filter. So we can easily look
// through our unique indices and get a match. We only look at a single
// filter per field. If there are multiple, we would use the last one.
// That's okay, we'll filter records out when we execute the leftover
// filters. Probably not common.
// This is common for filterEqual and filterIn on
// fields that have a unique index.
equalsIn := map[string]*filter[T]{}
for i := range q.xfilters {
ff := &q.xfilters[i]
switch f := (*ff).(type) {
case filterEqual[T]:
equalsIn[f.field.Name] = ff
case filterIn[T]:
equalsIn[f.field.Name] = ff
}
}
indices:
for _, idx := range q.st.Current.Indices {
// Direct fetches only for unique indices.
if !idx.Unique {
continue
}
for _, f := range idx.Fields {
if _, ok := equalsIn[f.Name]; !ok {
// At least one index field does not have a filter.
continue indices
}
}
// Calculate all keys that we need to retrieve from the index.
// todo optimization: if there is a sort involving these fields, we could do the sorting before fetching data.
// todo optimization: we can generate the keys on demand, will help when limit is in use: we are not generating all keys.
var keys [][]byte
var skipFilters []*filter[T] // Filters to remove from the full list because they are handled by quering the index.
for i, f := range idx.Fields {
var rvalues []reflect.Value
ff := equalsIn[f.Name]
skipFilters = append(skipFilters, ff)
switch fi := (*ff).(type) {
case filterEqual[T]:
rvalues = []reflect.Value{fi.rvalue}
case filterIn[T]:
rvalues = fi.rvalues
default:
return nil, fmt.Errorf("internal error: bad filter %T", equalsIn[f.Name])
}
fekeys := make([][]byte, len(rvalues))
for j, fv := range rvalues {
key, _, err := packIndexKeys([]reflect.Value{fv}, nil)
if err != nil {
q.error(err)
return nil, err
}
fekeys[j] = key
}
if i == 0 {
keys = fekeys
continue
}
// Multiply current keys with the new values.
nkeys := make([][]byte, 0, len(keys)*len(fekeys))
for _, k := range keys {
for _, fk := range fekeys {
nk := append(append([]byte{}, k...), fk...)
nkeys = append(nkeys, nk)
}
}
keys = nkeys
}
p := &plan[T]{
idx: idx,
unique: true,
keys: keys,
filters: dropFilters(q.xfilters, skipFilters),
orders: q.xorders,
}
return p, nil
}
// Try all other indices. We treat them all as non-unique indices now.
// We want to use the one with as many "equal" prefix fields as
// possible. Then we hope to use a scan on the remaining, either
// because of a filterCompare, or for an ordering. If there is a limit,
// orderings are preferred over compares.
equals := map[string]*filter[T]{}
for i := range q.xfilters {
ff := &q.xfilters[i]
switch f := (*ff).(type) {
case filterEqual[T]:
equals[f.field.Name] = ff
}
}
// We are going to generate new plans, and keep the new one if it is better than what we have.
var p *plan[T]
var nequals int
var nrange int
var ordered bool
evaluatePKOrIndex := func(idx *index) error {
var isPK bool
var packKeys func([]reflect.Value) ([]byte, error)
if idx == nil {
// Make pretend index.
isPK = true
idx = &index{
Fields: []field{q.st.Current.Fields[0]},
}
packKeys = func(l []reflect.Value) ([]byte, error) {
return packPK(l[0])
}
} else {
packKeys = func(l []reflect.Value) ([]byte, error) {
key, _, err := packIndexKeys(l, nil)
return key, err
}
}
var neq = 0
// log.Printf("idx %v", idx)
var skipFilters []*filter[T]
for _, f := range idx.Fields {
if ff, ok := equals[f.Name]; ok {
skipFilters = append(skipFilters, ff)
neq++
} else {
break
}
}
// See if the next field can be used for compare.
var gx, lx *filterCompare[T]
var nrng int
var order *order
orders := q.xorders
if neq < len(idx.Fields) {
nf := idx.Fields[neq]
for i := range q.xfilters {
ff := &q.xfilters[i]
switch f := (*ff).(type) {
case filterCompare[T]:
if f.field.Name != nf.Name {
continue
}
switch f.op {
case opGreater, opGreaterEqual:
if gx == nil {
gx = &f
skipFilters = append(skipFilters, ff)
nrng++
}
case opLess, opLessEqual:
if lx == nil {
lx = &f
skipFilters = append(skipFilters, ff)
nrng++
}
}
}
}
// See if it can be used for ordering.
// todo optimization: we could use multiple orders
if len(orders) > 0 && orders[0].field.Name == nf.Name {
order = &orders[0]
orders = orders[1:]
}
}
// See if this is better than what we had.
if !(neq > nequals || (neq == nequals && (nrng > nrange || order != nil && !ordered && (q.xlimit > 0 || nrng == nrange)))) {
// log.Printf("plan not better, neq %d, nrng %d, limit %d, order %v ordered %v", neq, nrng, q.limit, order, ordered)
return nil
}
nequals = neq
nrange = nrng
ordered = order != nil
// Calculate the prefix key.
var kvalues []reflect.Value
for i := 0; i < neq; i++ {
f := idx.Fields[i]
kvalues = append(kvalues, (*equals[f.Name]).(filterEqual[T]).rvalue)
}
var key []byte
var err error
if neq > 0 {
key, err = packKeys(kvalues)
if err != nil {
return err
}
}
start := key
stop := key
if gx != nil {
k, err := packKeys([]reflect.Value{gx.value})
if err != nil {
return err
}
start = append(append([]byte{}, start...), k...)
}
if lx != nil {
k, err := packKeys([]reflect.Value{lx.value})
if err != nil {
return err
}
stop = append(append([]byte{}, stop...), k...)
}
startInclusive := gx == nil || gx.op != opGreater
stopInclusive := lx == nil || lx.op != opLess
if order != nil && !order.asc {
start, stop = stop, start
startInclusive, stopInclusive = stopInclusive, startInclusive
}
if isPK {
idx = nil // Clear our fake index for PK.
}
p = &plan[T]{
idx: idx,
desc: order != nil && !order.asc,
start: start,
stop: stop,
startInclusive: startInclusive,
stopInclusive: stopInclusive,
filters: dropFilters(q.xfilters, skipFilters),
orders: orders,
}
return nil
}
if err := evaluatePKOrIndex(nil); err != nil {
q.error(err)
return nil, q.err
}
for _, idx := range q.st.Current.Indices {
if err := evaluatePKOrIndex(idx); err != nil {
q.error(err)
return nil, q.err
}
}
if p != nil {
return p, nil
}
// We'll just do a scan over all data.
p = &plan[T]{
filters: q.xfilters,
orders: q.xorders,
}
return p, nil
}
func dropFilters[T any](filters []T, skip []*T) []T {
n := make([]T, 0, len(filters)-len(skip))
next:
for i := range filters {
f := &filters[i]
for _, s := range skip {
if f == s {
continue next
}
}
n = append(n, *f)
}
return n
}

1130
vendor/github.com/mjl-/bstore/query.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1215
vendor/github.com/mjl-/bstore/register.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

105
vendor/github.com/mjl-/bstore/stats.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package bstore
// StatsKV represent operations on the underlying BoltDB key/value store.
type StatsKV struct {
Get uint
Put uint // For Stats.Bucket, this counts calls of CreateBucket.
Delete uint
Cursor uint // Any cursor operation: Seek/First/Last/Next/Prev.
}
// Stats tracks DB/Tx/Query statistics, mostly counters.
type Stats struct {
// Number of read-only or writable transactions. Set for DB only.
Reads uint
Writes uint
Bucket StatsKV // Use of buckets.
Records StatsKV // Use of records bucket for types.
Index StatsKV // Use of index buckets for types.
// Operations that modify the database. Each record is counted, e.g.
// for a query that updates/deletes multiple records.
Get uint
Insert uint
Update uint
Delete uint
Queries uint // Total queries executed.
PlanTableScan uint // Full table scans.
PlanPK uint // Primary key get.
PlanUnique uint // Full key Unique index get.
PlanPKScan uint // Scan over primary keys.
PlanIndexScan uint // Scan over index.
Sort uint // In-memory collect and sort.
LastType string // Last type queried.
LastIndex string // Last index for LastType used for a query, or empty.
LastOrdered bool // Whether last scan (PK or index) use was ordered, e.g. for sorting or because of a comparison filter.
LastAsc bool // If ordered, whether last index scan was ascending.
}
func (skv *StatsKV) add(n StatsKV) {
skv.Get += n.Get
skv.Put += n.Put
skv.Delete += n.Delete
skv.Cursor += n.Cursor
}
func (skv *StatsKV) sub(n StatsKV) {
skv.Get -= n.Get
skv.Put -= n.Put
skv.Delete -= n.Delete
skv.Cursor -= n.Cursor
}
func (st *Stats) add(n Stats) {
st.Reads += n.Reads
st.Writes += n.Writes
st.Bucket.add(n.Bucket)
st.Records.add(n.Records)
st.Index.add(n.Index)
st.Get += n.Get
st.Insert += n.Insert
st.Update += n.Update
st.Delete += n.Delete
st.Queries += n.Queries
st.PlanTableScan += n.PlanTableScan
st.PlanPK += n.PlanPK
st.PlanUnique += n.PlanUnique
st.PlanPKScan += n.PlanPKScan
st.PlanIndexScan += n.PlanIndexScan
st.Sort += n.Sort
st.LastType = n.LastType
st.LastIndex = n.LastIndex
st.LastOrdered = n.LastOrdered
st.LastAsc = n.LastAsc
}
// Sub returns st with the counters from o subtracted.
func (st Stats) Sub(o Stats) Stats {
st.Reads -= o.Reads
st.Writes -= o.Writes
st.Bucket.sub(o.Bucket)
st.Records.sub(o.Records)
st.Index.sub(o.Index)
st.Get -= o.Get
st.Insert -= o.Insert
st.Update -= o.Update
st.Delete -= o.Delete
st.Queries -= o.Queries
st.PlanTableScan -= o.PlanTableScan
st.PlanPK -= o.PlanPK
st.PlanUnique -= o.PlanUnique
st.PlanPKScan -= o.PlanPKScan
st.PlanIndexScan -= o.PlanIndexScan
st.Sort -= o.Sort
return st
}

566
vendor/github.com/mjl-/bstore/store.go generated vendored Normal file
View File

@ -0,0 +1,566 @@
package bstore
import (
"encoding"
"errors"
"fmt"
"io"
"io/fs"
"os"
"reflect"
"sync"
"time"
bolt "go.etcd.io/bbolt"
)
var (
ErrAbsent = errors.New("absent") // If a function can return an ErrAbsent, it can be compared directly, without errors.Is.
ErrZero = errors.New("must be nonzero")
ErrUnique = errors.New("not unique")
ErrReference = errors.New("referential inconsistency")
ErrMultiple = errors.New("multiple results")
ErrSeq = errors.New("highest autoincrement sequence value reached")
ErrType = errors.New("unknown/bad type")
ErrIncompatible = errors.New("incompatible types")
ErrFinished = errors.New("query finished")
ErrStore = errors.New("internal/storage error") // E.g. when buckets disappear, possibly by external users of the underlying BoltDB database.
ErrParam = errors.New("bad parameters")
errTxClosed = errors.New("transaction is closed")
errNestedIndex = errors.New("struct tags index/unique only allowed at top-level structs")
)
var sanityChecks bool // Only enabled during tests.
// DB is a database storing Go struct values in an underlying bolt database.
// DB is safe for concurrent use, unlike a Tx or a Query.
type DB struct {
bdb *bolt.DB
// Read transaction take an rlock on types. Register can make changes and
// needs a wlock.
typesMutex sync.RWMutex
types map[reflect.Type]storeType
typeNames map[string]storeType // Go type name to store type, for checking duplicates.
statsMutex sync.Mutex
stats Stats
}
// Tx is a transaction on DB.
//
// A Tx is not safe for concurrent use.
type Tx struct {
db *DB // If nil, this transaction is closed.
btx *bolt.Tx
bucketCache map[bucketKey]*bolt.Bucket
stats Stats
}
// bucketKey represents a subbucket for a type.
type bucketKey struct {
typeName string
sub string // Empty for top-level type bucket, otherwise "records", "types" or starting with "index.".
}
type index struct {
Unique bool
Name string // Normally named after the field. But user can specify alternative name with "index" or "unique" struct tag with parameter.
Fields []field
tv *typeVersion
}
type storeType struct {
Name string // Name of type as stored in database. Different from the current Go type name if the uses the "typename" struct tag.
Type reflect.Type // Type we parse into for new values.
Current *typeVersion
// Earlier schema versions. Older type versions can still be stored. We
// prepare them for parsing into the reflect.Type. Some stored fields in
// old versions may be ignored: when a later schema has removed the field,
// that old stored field is considered deleted and will be ignored when
// parsing.
Versions map[uint32]*typeVersion
}
// note: when changing, possibly update func equal as well.
type typeVersion struct {
Version uint32 // First uvarint of a stored record references this version.
OndiskVersion uint32 // Version of on-disk format. Currently always 1.
Noauto bool // If true, the primary key is an int but opted out of autoincrement.
Fields []field // Fields that we store. Embed/anonymous fields are kept separately in embedFields, and are not stored.
Indices map[string]*index // By name of index.
ReferencedBy map[string]struct{} // Type names that reference this type. We require they are registered at the same time to maintain referential integrity.
name string
referencedBy []*index // Indexes (from other types) that reference this type.
references map[string]struct{} // Keys are the type names referenced. This is a summary for the references from Fields.
embedFields []embed // Embed/anonymous fields, their values are stored through Fields, we keep them for setting values.
fillPercent float64 // For "records" bucket. Set to 1 for append-only/mostly use as set with HintAppend, 0.5 otherwise.
}
// note: when changing, possibly update func equal as well.
// embed/anonymous fields are represented as type embed. The fields inside the embed type are of this type field.
type field struct {
Name string
Type fieldType
Nonzero bool
References []string // Referenced fields. Only for the top-level struct fields, not for nested structs.
Default string // As specified in struct tag. Processed version is defaultValue.
// If not the zero reflect.Value, set this value instead of a zero value on insert.
// This is always a non-pointer value. Only set for the current typeVersion
// linked to a Go type.
defaultValue reflect.Value
// Only set if this typeVersion will parse this field. We check
// structField.Type for non-nil before parsing this field. We don't parse it
// if this field is no longer in the type, or if it has been removed and
// added again in later schema versions.
structField reflect.StructField
indices map[string]*index
}
// embed is for embed/anonymous fields. the fields inside are represented as a type field.
type embed struct {
Name string
Type fieldType
structField reflect.StructField
}
type kind int
const (
kindInvalid kind = iota
kindBytes
kindBool
kindInt
kindInt8
kindInt16
kindInt32
kindInt64
kindUint
kindUint8
kindUint16
kindUint32
kindUint64
kindFloat32
kindFloat64
kindMap
kindSlice
kindString
kindTime
kindBinaryMarshal
kindStruct
)
var kindStrings = []string{
"(invalid)",
"bytes",
"bool",
"int",
"int8",
"int16",
"int32",
"int64",
"uint",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"map",
"slice",
"string",
"time",
"binarymarshal",
"struct",
}
func (k kind) String() string {
return kindStrings[k]
}
type fieldType struct {
Ptr bool // If type is a pointer.
Kind kind // Type with possible Ptr deferenced.
Fields []field // For kindStruct.
MapKey, MapValue *fieldType // For kindMap.
List *fieldType // For kindSlice.
}
func (ft fieldType) String() string {
s := ft.Kind.String()
if ft.Ptr {
return s + "ptr"
}
return s
}
// Options configure how a database should be opened or initialized.
type Options struct {
Timeout time.Duration // Abort if opening DB takes longer than Timeout.
Perm fs.FileMode // Permissions for new file if created. If zero, 0600 is used.
MustExist bool // Before opening, check that file exists. If not, io/fs.ErrNotExist is returned.
}
// Open opens a bstore database and registers types by calling Register.
//
// If the file does not exist, a new database file is created, unless opts has
// MustExist set. Files are created with permission 0600, or with Perm from
// Options if nonzero.
//
// Only one DB instance can be open for a file at a time. Use opts.Timeout to
// specify a timeout during open to prevent indefinite blocking.
func Open(path string, opts *Options, typeValues ...any) (*DB, error) {
var bopts *bolt.Options
if opts != nil && opts.Timeout > 0 {
bopts = &bolt.Options{Timeout: opts.Timeout}
}
var mode fs.FileMode = 0600
if opts != nil && opts.Perm != 0 {
mode = opts.Perm
}
if opts != nil && opts.MustExist {
if _, err := os.Stat(path); err != nil {
return nil, err
}
}
bdb, err := bolt.Open(path, mode, bopts)
if err != nil {
return nil, err
}
typeNames := map[string]storeType{}
types := map[reflect.Type]storeType{}
db := &DB{bdb: bdb, typeNames: typeNames, types: types}
if err := db.Register(typeValues...); err != nil {
bdb.Close()
return nil, err
}
return db, nil
}
// Close closes the underlying database.
func (db *DB) Close() error {
return db.bdb.Close()
}
// Stats returns usage statistics for the lifetime of DB. Stats are tracked
// first in a Query or a Tx. Stats from a Query are propagated to its Tx when
// the Query finishes. Stats from a Tx are propagated to its DB when the
// transaction ends.
func (db *DB) Stats() Stats {
db.statsMutex.Lock()
defer db.statsMutex.Unlock()
return db.stats
}
// Stats returns usage statistics for this transaction.
// When a transaction is rolled back or committed, its statistics are copied
// into its DB.
func (tx *Tx) Stats() Stats {
return tx.stats
}
// WriteTo writes the entire database to w, not including changes made during this transaction.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
return tx.btx.WriteTo(w)
}
// return a bucket through cache.
func (tx *Tx) bucket(bk bucketKey) (*bolt.Bucket, error) {
if tx.bucketCache == nil {
tx.bucketCache = map[bucketKey]*bolt.Bucket{}
}
b := tx.bucketCache[bk]
if b != nil {
return b, nil
}
top := tx.bucketCache[bucketKey{bk.typeName, ""}]
if top == nil {
tx.stats.Bucket.Get++
top = tx.btx.Bucket([]byte(bk.typeName))
if top == nil {
return nil, fmt.Errorf("%w: missing bucket for type %q", ErrStore, bk.typeName)
}
tx.bucketCache[bucketKey{bk.typeName, ""}] = top
}
if bk.sub == "" {
return top, nil
}
tx.stats.Bucket.Get++
b = top.Bucket([]byte(bk.sub))
if b == nil {
return nil, fmt.Errorf("%w: missing bucket %q for type %q", ErrStore, bk.sub, bk.typeName)
}
tx.bucketCache[bk] = b
return b, nil
}
func (tx *Tx) typeBucket(typeName string) (*bolt.Bucket, error) {
return tx.bucket(bucketKey{typeName, ""})
}
func (tx *Tx) recordsBucket(typeName string, fillPercent float64) (*bolt.Bucket, error) {
b, err := tx.bucket(bucketKey{typeName, "records"})
if err != nil {
return nil, err
}
b.FillPercent = fillPercent
return b, nil
}
func (tx *Tx) indexBucket(idx *index) (*bolt.Bucket, error) {
return tx.bucket(bucketKey{idx.tv.name, "index." + idx.Name})
}
// Drop removes a type and its data from the database.
// If the type is currently registered, it is unregistered and no longer available.
// If a type is still referenced by another type, eg through a "ref" struct tag,
// ErrReference is returned.
// If the type does not exist, ErrAbsent is returned.
func (db *DB) Drop(name string) error {
return db.Write(func(tx *Tx) error {
tx.stats.Bucket.Get++
if tx.btx.Bucket([]byte(name)) == nil {
return ErrAbsent
}
if st, ok := db.typeNames[name]; ok && len(st.Current.referencedBy) > 0 {
return fmt.Errorf("%w: type is still referenced", ErrReference)
} else if ok {
for ref := range st.Current.references {
var n []*index
for _, idx := range db.typeNames[ref].Current.referencedBy {
if idx.tv != st.Current {
n = append(n, idx)
}
}
db.typeNames[ref].Current.referencedBy = n
}
delete(db.typeNames, name)
delete(db.types, st.Type)
}
tx.stats.Bucket.Delete++
return tx.btx.DeleteBucket([]byte(name))
})
}
// Delete calls Delete on a new writable Tx.
func (db *DB) Delete(values ...any) error {
return db.Write(func(tx *Tx) error {
return tx.Delete(values...)
})
}
// Get calls Get on a new read-only Tx.
func (db *DB) Get(values ...any) error {
return db.Read(func(tx *Tx) error {
return tx.Get(values...)
})
}
// Insert calls Insert on a new writable Tx.
func (db *DB) Insert(values ...any) error {
return db.Write(func(tx *Tx) error {
return tx.Insert(values...)
})
}
// Update calls Update on a new writable Tx.
func (db *DB) Update(values ...any) error {
return db.Write(func(tx *Tx) error {
return tx.Update(values...)
})
}
var typeKinds = map[reflect.Kind]kind{
reflect.Bool: kindBool,
reflect.Int: kindInt,
reflect.Int8: kindInt8,
reflect.Int16: kindInt16,
reflect.Int32: kindInt32,
reflect.Int64: kindInt64,
reflect.Uint: kindUint,
reflect.Uint8: kindUint8,
reflect.Uint16: kindUint16,
reflect.Uint32: kindUint32,
reflect.Uint64: kindUint64,
reflect.Float32: kindFloat32,
reflect.Float64: kindFloat64,
reflect.Map: kindMap,
reflect.Slice: kindSlice,
reflect.String: kindString,
}
func typeKind(t reflect.Type) (kind, error) {
if t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
return kindBytes, nil
}
k, ok := typeKinds[t.Kind()]
if ok {
return k, nil
}
if t == reflect.TypeOf(zerotime) {
return kindTime, nil
}
if reflect.PointerTo(t).AssignableTo(reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()) {
return kindBinaryMarshal, nil
}
if t.Kind() == reflect.Struct {
return kindStruct, nil
}
return kind(0), fmt.Errorf("%w: unsupported type %v", ErrType, t)
}
func typeName(t reflect.Type) (string, error) {
tags, err := newStoreTags(t.Field(0).Tag.Get("bstore"), true)
if err != nil {
return "", err
}
if name, err := tags.Get("typename"); err != nil {
return "", err
} else if name != "" {
return name, nil
}
return t.Name(), nil
}
// Get value for a key. For insert a next sequence may be generated for the
// primary key.
func (tv typeVersion) keyValue(tx *Tx, rv reflect.Value, insert bool, rb *bolt.Bucket) ([]byte, reflect.Value, bool, error) {
f := tv.Fields[0]
krv := rv.FieldByIndex(f.structField.Index)
var seq bool
if krv.IsZero() {
if !insert {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key can not be zero value", ErrParam)
}
if tv.Noauto {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key cannot be zero value without autoincrement", ErrParam)
}
id, err := rb.NextSequence()
if err != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("next primary key: %w", err)
}
switch f.Type.Kind {
case kindInt, kindInt8, kindInt16, kindInt32, kindInt64:
if krv.OverflowInt(int64(id)) {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq)
}
krv.SetInt(int64(id))
case kindUint, kindUint8, kindUint16, kindUint32, kindUint64:
if krv.OverflowUint(id) {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq)
}
krv.SetUint(id)
default:
// todo: should check this during register.
return nil, reflect.Value{}, seq, fmt.Errorf("%w: unsupported autoincrement primary key type %v", ErrZero, f.Type.Kind)
}
seq = true
} else if !tv.Noauto && insert {
// We let user insert their own ID for our own autoincrement
// PK. But we update the internal next sequence if the users's
// PK is highest yet, so a future autoincrement insert will succeed.
switch f.Type.Kind {
case kindInt, kindInt8, kindInt16, kindInt32, kindInt64:
v := krv.Int()
if v > 0 && uint64(v) > rb.Sequence() {
if err := rb.SetSequence(uint64(v)); err != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err)
}
}
case kindUint, kindUint8, kindUint16, kindUint32, kindUint64:
v := krv.Uint()
if v > rb.Sequence() {
if err := rb.SetSequence(v); err != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err)
}
}
}
}
k, err := packPK(krv)
if err != nil {
return nil, reflect.Value{}, seq, err
}
if seq {
tx.stats.Records.Get++
if rb.Get(k) != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: internal error: next sequence value is already present", ErrUnique)
}
}
return k, krv, seq, err
}
// Read calls function fn with a new read-only transaction, ensuring transaction rollback.
func (db *DB) Read(fn func(*Tx) error) error {
db.typesMutex.RLock()
defer db.typesMutex.RUnlock()
return db.bdb.View(func(btx *bolt.Tx) error {
tx := &Tx{db: db, btx: btx}
tx.stats.Reads++
defer tx.addStats()
return fn(tx)
})
}
// Write calls function fn with a new read-write transaction. If fn returns
// nil, the transaction is committed. Otherwise the transaction is rolled back.
func (db *DB) Write(fn func(*Tx) error) error {
db.typesMutex.RLock()
defer db.typesMutex.RUnlock()
return db.bdb.Update(func(btx *bolt.Tx) error {
tx := &Tx{db: db, btx: btx}
tx.stats.Writes++
defer tx.addStats()
return fn(tx)
})
}
// lookup storeType based on name of rt.
func (db *DB) storeType(rt reflect.Type) (storeType, error) {
st, ok := db.types[rt]
if !ok {
return storeType{}, fmt.Errorf("%w: %v", ErrType, rt)
}
return st, nil
}
// HintAppend sets a hint whether changes to the types indicated by each struct
// from values is (mostly) append-only.
//
// This currently sets the BoltDB bucket FillPercentage to 1 for efficient use
// of storage space.
func (db *DB) HintAppend(append bool, values ...any) error {
db.typesMutex.Lock()
defer db.typesMutex.Unlock()
for _, v := range values {
t := reflect.TypeOf(v)
st, err := db.storeType(t)
if err != nil {
return err
}
if append {
st.Current.fillPercent = 1.0
} else {
st.Current.fillPercent = 0.5
}
}
return nil
}

69
vendor/github.com/mjl-/bstore/tags.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
package bstore
import (
"fmt"
"strings"
)
type storeTags []string
func newStoreTags(tag string, isPK bool) (storeTags, error) {
if tag == "" {
return nil, nil
}
l := strings.Split(tag, ",")
for _, s := range l {
w := strings.SplitN(s, " ", 2)
switch w[0] {
case "noauto", "typename":
if !isPK {
return nil, fmt.Errorf("%w: cannot have tag %q for non-primary key", ErrType, w[0])
}
case "index", "unique", "default", "-":
if isPK {
return nil, fmt.Errorf("%w: cannot have tag %q on primary key", ErrType, w[0])
}
case "name", "nonzero", "ref":
default:
return nil, fmt.Errorf("%w: unknown store tag %q", ErrType, w[0])
}
}
return storeTags(l), nil
}
func (t storeTags) Has(word string) bool {
for _, s := range t {
if s == word {
return true
}
}
return false
}
func (t storeTags) Get(word string) (string, error) {
wordsp := word + " "
for _, s := range t {
if strings.HasPrefix(s, wordsp) {
r := s[len(wordsp):]
if r == "" {
return "", fmt.Errorf("%w: bstore word %q requires non-empty parameter", ErrType, word)
}
return r, nil
} else if s == word {
return "", fmt.Errorf("%w: bstore word %q requires argument", ErrType, word)
}
}
return "", nil
}
func (t storeTags) List(word string) []string {
var l []string
wordsp := word + " "
for _, s := range t {
if strings.HasPrefix(s, wordsp) {
l = append(l, s[len(wordsp):])
}
}
return l
}

438
vendor/github.com/mjl-/bstore/tx.go generated vendored Normal file
View File

@ -0,0 +1,438 @@
package bstore
import (
"bytes"
"fmt"
"reflect"
bolt "go.etcd.io/bbolt"
)
func (tx *Tx) structptr(value any) (reflect.Value, error) {
rv := reflect.ValueOf(value)
if !rv.IsValid() || rv.Kind() != reflect.Ptr || !rv.Elem().IsValid() || rv.Type().Elem().Kind() != reflect.Struct {
return reflect.Value{}, fmt.Errorf("%w: value must be non-nil pointer to a struct, is %T", ErrParam, value)
}
rv = rv.Elem()
return rv, nil
}
func (tx *Tx) structOrStructptr(value any) (reflect.Value, error) {
rv := reflect.ValueOf(value)
if !rv.IsValid() {
return reflect.Value{}, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam)
}
if rv.Kind() == reflect.Ptr {
rv = rv.Elem()
if !rv.IsValid() {
return rv, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam)
}
}
if rv.Kind() != reflect.Struct {
return reflect.Value{}, fmt.Errorf("%w: value must be a struct or pointer to a struct, is %T", ErrParam, value)
}
return rv, nil
}
// update indices by comparing indexed fields of the ov (old) and v (new). Only if
// the fields changed will the index be updated. Either ov or v may be the
// reflect.Value zero value, indicating there is no old/new value and the index
// should be updated.
func (tx *Tx) updateIndices(tv *typeVersion, pk []byte, ov, v reflect.Value) error {
changed := func(idx *index) bool {
for _, f := range idx.Fields {
rofv := ov.FieldByIndex(f.structField.Index)
nofv := v.FieldByIndex(f.structField.Index)
// note: checking the interface values is enough, we only allow comparable types as index fields.
if rofv.Interface() != nofv.Interface() {
return true
}
}
return false
}
for _, idx := range tv.Indices {
var add, remove bool
if !ov.IsValid() {
add = true
} else if !v.IsValid() {
remove = true
} else if !changed(idx) {
continue
} else {
add, remove = true, true
}
ib, err := tx.indexBucket(idx)
if err != nil {
return err
}
if remove {
_, ik, err := idx.packKey(ov, pk)
if err != nil {
return err
}
tx.stats.Index.Delete++
if sanityChecks {
tx.stats.Index.Get++
if ib.Get(ik) == nil {
return fmt.Errorf("internal error: key missing from index")
}
}
if err := ib.Delete(ik); err != nil {
return fmt.Errorf("%w: removing from index: %s", ErrStore, err)
}
}
if add {
prek, ik, err := idx.packKey(v, pk)
if err != nil {
return err
}
if idx.Unique {
tx.stats.Index.Cursor++
if xk, _ := ib.Cursor().Seek(prek); xk != nil && bytes.HasPrefix(xk, prek) {
return fmt.Errorf("%w: %q", ErrUnique, idx.Name)
}
}
tx.stats.Index.Put++
if err := ib.Put(ik, []byte{}); err != nil {
return fmt.Errorf("inserting into index: %w", err)
}
}
}
return nil
}
func (tx *Tx) checkReferences(tv *typeVersion, pk []byte, ov, rv reflect.Value) error {
for _, f := range tv.Fields {
if len(f.References) == 0 {
continue
}
frv := rv.FieldByIndex(f.structField.Index)
if frv.IsZero() || (ov.IsValid() && ov.FieldByIndex(f.structField.Index).Interface() == frv.Interface()) {
continue
}
k, err := packPK(frv)
if err != nil {
return err
}
for _, name := range f.References {
rb, err := tx.recordsBucket(name, tv.fillPercent)
if err != nil {
return err
}
if rb.Get(k) == nil {
return fmt.Errorf("%w: value %v from field %q to %q", ErrReference, frv.Interface(), f.Name, name)
}
}
}
return nil
}
func (tx *Tx) addStats() {
tx.db.statsMutex.Lock()
tx.db.stats.add(tx.stats)
tx.db.statsMutex.Unlock()
tx.stats = Stats{}
}
// Get fetches records by their primary key from the database. Each value must
// be a pointer to a struct.
//
// ErrAbsent is returned if the record does not exist.
func (tx *Tx) Get(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Get++
rv, err := tx.structptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
k, _, _, err := st.Current.keyValue(tx, rv, false, rb)
if err != nil {
return err
}
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
if err := st.parse(rv, bv); err != nil {
return err
}
}
return nil
}
// Delete removes values by their primary key from the database. Each value
// must be a struct or pointer to a struct. Indices are automatically updated
// and referential integrity is maintained.
//
// ErrAbsent is returned if the record does not exist.
// ErrReference is returned if another record still references this record.
func (tx *Tx) Delete(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Delete++
rv, err := tx.structOrStructptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
k, _, _, err := st.Current.keyValue(tx, rv, false, rb)
if err != nil {
return err
}
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
rov, err := st.parseNew(k, bv)
if err != nil {
return fmt.Errorf("parsing current value: %w", err)
}
if err := tx.delete(rb, st, k, rov); err != nil {
return err
}
}
return nil
}
func (tx *Tx) delete(rb *bolt.Bucket, st storeType, k []byte, rov reflect.Value) error {
// Check that anyone referencing this type does not reference this record.
for _, refBy := range st.Current.referencedBy {
if ib, err := tx.indexBucket(refBy); err != nil {
return err
} else {
tx.stats.Index.Cursor++
if xk, _ := ib.Cursor().Seek(k); xk != nil && bytes.HasPrefix(xk, k) {
return fmt.Errorf("%w: index %q", ErrReference, refBy.Name)
}
}
}
// Delete value from indices.
if err := tx.updateIndices(st.Current, k, rov, reflect.Value{}); err != nil {
return fmt.Errorf("removing from indices: %w", err)
}
tx.stats.Records.Delete++
return rb.Delete(k)
}
// Update updates records represented by values by their primary keys into the
// database. Each value must be a pointer to a struct. Indices are
// automatically updated.
//
// ErrAbsent is returned if the record does not exist.
func (tx *Tx) Update(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Update++
rv, err := tx.structptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
if err := tx.put(st, rv, false); err != nil {
return err
}
}
return nil
}
// Insert inserts values as new records into the database. Each value must be a
// pointer to a struct. If the primary key field is zero and autoincrement is not
// disabled, the next sequence is assigned. Indices are automatically updated.
//
// ErrUnique is returned if the record already exists.
// ErrSeq is returned if no next autoincrement integer is available.
// ErrZero is returned if a nonzero constraint would be violated.
// ErrReference is returned if another record is referenced that does not exist.
func (tx *Tx) Insert(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Insert++
rv, err := tx.structptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
if err := st.Current.applyDefault(rv); err != nil {
return err
}
if err := tx.put(st, rv, true); err != nil {
return err
}
}
return nil
}
func (tx *Tx) put(st storeType, rv reflect.Value, insert bool) error {
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
k, krv, seq, err := st.Current.keyValue(tx, rv, insert, rb)
if err != nil {
return err
}
if insert {
tx.stats.Records.Get++
bv := rb.Get(k)
if bv != nil {
return fmt.Errorf("%w: record already exists", ErrUnique)
}
err := tx.insert(rb, st, rv, krv, k)
if err != nil && seq {
// Zero out the generated sequence.
krv.Set(reflect.Zero(krv.Type()))
}
return err
} else {
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
ov, err := st.parseNew(k, bv)
if err != nil {
return fmt.Errorf("parsing current value: %w", err)
}
return tx.update(rb, st, rv, ov, k)
}
}
func (tx *Tx) insert(rb *bolt.Bucket, st storeType, rv, krv reflect.Value, k []byte) error {
v, err := st.pack(rv)
if err != nil {
return err
}
if err := tx.checkReferences(st.Current, k, reflect.Value{}, rv); err != nil {
return err
}
if err := tx.updateIndices(st.Current, k, reflect.Value{}, rv); err != nil {
return fmt.Errorf("updating indices for inserted value: %w", err)
}
tx.stats.Records.Put++
if err := rb.Put(k, v); err != nil {
return err
}
rv.Field(0).Set(krv)
return nil
}
func (tx *Tx) update(rb *bolt.Bucket, st storeType, rv, rov reflect.Value, k []byte) error {
if st.Current.equal(rov, rv) {
return nil
}
v, err := st.pack(rv)
if err != nil {
return err
}
if err := tx.checkReferences(st.Current, k, rov, rv); err != nil {
return err
}
if err := tx.updateIndices(st.Current, k, rov, rv); err != nil {
return fmt.Errorf("updating indices for updated record: %w", err)
}
tx.stats.Records.Put++
return rb.Put(k, v)
}
// Begin starts a transaction.
//
// If writable is true, the transaction allows modifications. Only one writable
// transaction can be active at a time on a DB. No read-only transactions can be
// active at the same time. Attempting to begin a read-only transaction from a
// writable transaction leads to deadlock.
//
// A writable Tx can be committed or rolled back. A read-only transaction must
// always be rolled back.
func (db *DB) Begin(writable bool) (*Tx, error) {
btx, err := db.bdb.Begin(writable)
if err != nil {
return nil, err
}
db.typesMutex.RLock()
tx := &Tx{db: db, btx: btx}
if writable {
tx.stats.Writes++
} else {
tx.stats.Reads++
}
return tx, nil
}
// Rollback aborts and cancels any changes made in this transaction.
// Statistics are added to its DB.
func (tx *Tx) Rollback() error {
if tx.db == nil {
return errTxClosed
}
tx.addStats()
tx.db.typesMutex.RUnlock()
err := tx.btx.Rollback()
tx.db = nil
return err
}
// Commit commits changes made in the transaction to the database.
// Statistics are added to its DB.
func (tx *Tx) Commit() error {
if tx.db == nil {
return errTxClosed
}
tx.addStats()
tx.db.typesMutex.RUnlock()
err := tx.btx.Commit()
if err != nil {
tx.btx.Rollback() // Nothing to do for error.
}
tx.db = nil
return err
}