mirror of
https://github.com/mjl-/mox.git
synced 2025-07-12 16:24:37 +03:00
mox!
This commit is contained in:
3
vendor/github.com/mjl-/bstore/.gitignore
generated
vendored
Normal file
3
vendor/github.com/mjl-/bstore/.gitignore
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/cover.out
|
||||
/cover.html
|
||||
/testdata/*.db
|
7
vendor/github.com/mjl-/bstore/LICENSE
generated
vendored
Normal file
7
vendor/github.com/mjl-/bstore/LICENSE
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
Copyright (c) 2022 Mechiel Lukkien
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
20
vendor/github.com/mjl-/bstore/Makefile
generated
vendored
Normal file
20
vendor/github.com/mjl-/bstore/Makefile
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
build:
|
||||
go build ./...
|
||||
go vet ./...
|
||||
GOARCH=386 go vet ./...
|
||||
staticcheck ./...
|
||||
./gendoc.sh
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
||||
gofmt -w -s *.go cmd/bstore/*.go
|
||||
|
||||
test:
|
||||
go test -race -shuffle=on -coverprofile cover.out
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
benchmark:
|
||||
go test -bench .
|
||||
|
||||
fuzz:
|
||||
go test -fuzz .
|
51
vendor/github.com/mjl-/bstore/README.md
generated
vendored
Normal file
51
vendor/github.com/mjl-/bstore/README.md
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
bstore is a database library for storing and quering Go struct data.
|
||||
|
||||
See https://pkg.go.dev/github.com/mjl-/bstore
|
||||
|
||||
MIT-licensed
|
||||
|
||||
# Comparison
|
||||
|
||||
Bstore is designed as a small, pure Go library that still provides most of the
|
||||
common data consistency requirements for modest database use cases. Bstore aims
|
||||
to make basic use of cgo-based libraries, such as sqlite, unnecessary. Sqlite
|
||||
is a great library, but Go applications that require cgo are hard to
|
||||
cross-compile. With bstore, cross-compiling to most Go-supported platforms
|
||||
stays trivial. Although bstore is much more limited in so many aspects than
|
||||
sqlite, bstore also offers some advantages as well.
|
||||
|
||||
- Cross-compilation and reproducibility: Trivial with bstore due to pure Go,
|
||||
much harder with sqlite because of cgo.
|
||||
- Code complexity: low with bstore (6k lines including comments/docs), high
|
||||
with sqlite.
|
||||
- Query language: mostly-type-checked function calls in bstore, free-form query
|
||||
strings only checked at runtime with sqlite.
|
||||
- Functionality: very limited with bstore, much more full-featured with sqlite.
|
||||
- Schema management: mostly automatic based on Go type definitions in bstore,
|
||||
manual with ALTER statements in sqlite.
|
||||
- Types and packing/parsing: automatic/transparent in bstore based on Go types
|
||||
(including maps, slices, structs and custom MarshalBinary encoding), versus
|
||||
manual scanning and parameter passing with sqlite with limited set of SQL
|
||||
types.
|
||||
- Performance: low to good performance with bstore, high performance with
|
||||
sqlite.
|
||||
- Database files: single file with bstore, several files with sqlite (due to
|
||||
WAL or journal files).
|
||||
- Test coverage: decent coverage but limited real-world for bstore, versus
|
||||
extremely thoroughly tested and with enormous real-world use.
|
||||
|
||||
# FAQ
|
||||
|
||||
Q: Is bstore an ORM?
|
||||
|
||||
A: No. The API for bstore may look like an ORM. But instead of mapping bstore
|
||||
"queries" (function calls) to an SQL query string, bstore executes them
|
||||
directly without converting to a query language.
|
||||
|
||||
Q: How does bstore store its data?
|
||||
|
||||
A bstore database is a single-file BoltDB database. BoltDB provides ACID
|
||||
properties. Bstore uses a BoltDB "bucket" (key/value store) for each Go type
|
||||
stored, with multiple subbuckets: one for type definitions, one for the actual
|
||||
data, and one bucket per index. BoltDB stores data in a B+tree. See format.md
|
||||
for details.
|
80
vendor/github.com/mjl-/bstore/default.go
generated
vendored
Normal file
80
vendor/github.com/mjl-/bstore/default.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
var zerotime = time.Time{}
|
||||
|
||||
// applyDefault replaces zero values for fields that have a Default value configured.
|
||||
func (tv *typeVersion) applyDefault(rv reflect.Value) error {
|
||||
for _, f := range tv.Fields[1:] {
|
||||
fv := rv.FieldByIndex(f.structField.Index)
|
||||
if err := f.applyDefault(fv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f field) applyDefault(rv reflect.Value) error {
|
||||
switch f.Type.Kind {
|
||||
case kindBytes, kindBinaryMarshal, kindMap:
|
||||
return nil
|
||||
|
||||
case kindSlice, kindStruct:
|
||||
return f.Type.applyDefault(rv)
|
||||
|
||||
case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindInt64, kindUint, kindUint8, kindUint16, kindUint32, kindUint64, kindFloat32, kindFloat64, kindString, kindTime:
|
||||
if !f.defaultValue.IsValid() || !rv.IsZero() {
|
||||
return nil
|
||||
}
|
||||
fv := f.defaultValue
|
||||
// Time is special. "now" is encoded as the zero value of time.Time.
|
||||
if f.Type.Kind == kindTime && fv.Interface() == zerotime {
|
||||
now := time.Now().Round(0)
|
||||
if f.Type.Ptr {
|
||||
fv = reflect.ValueOf(&now)
|
||||
} else {
|
||||
fv = reflect.ValueOf(now)
|
||||
}
|
||||
} else if f.Type.Ptr {
|
||||
fv = reflect.New(f.structField.Type.Elem())
|
||||
fv.Elem().Set(f.defaultValue)
|
||||
}
|
||||
rv.Set(fv)
|
||||
return nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("internal error: missing case for %v", f.Type.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// only for recursing. we do not support recursing into maps because it would
|
||||
// involve more work making values settable. and how sensible it it anyway?
|
||||
func (ft fieldType) applyDefault(rv reflect.Value) error {
|
||||
if ft.Ptr && (rv.IsZero() || rv.IsNil()) {
|
||||
return nil
|
||||
} else if ft.Ptr {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
switch ft.Kind {
|
||||
case kindSlice:
|
||||
n := rv.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
if err := ft.List.applyDefault(rv.Index(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case kindStruct:
|
||||
for _, nf := range ft.Fields {
|
||||
nfv := rv.FieldByIndex(nf.structField.Index)
|
||||
if err := nf.applyDefault(nfv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
142
vendor/github.com/mjl-/bstore/doc.go
generated
vendored
Normal file
142
vendor/github.com/mjl-/bstore/doc.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
Package bstore is a database library for storing and quering Go struct data.
|
||||
|
||||
Bstore is designed as a small, pure Go library that still provides most of
|
||||
the common data consistency requirements for modest database use cases. Bstore
|
||||
aims to make basic use of cgo-based libraries, such as sqlite, unnecessary.
|
||||
|
||||
Bstore implements autoincrementing primary keys, indices, default values,
|
||||
enforcement of nonzero, unique and referential integrity constraints, automatic
|
||||
schema updates and a query API for combining filters/sorting/limits. Queries
|
||||
are planned and executed using indices for fast execution where possible.
|
||||
Bstores is designed with the Go type system in mind: you typically don't have to
|
||||
write any (un)marshal code for your types.
|
||||
|
||||
# Field types
|
||||
|
||||
Struct field types currently supported for storing, including pointers to these
|
||||
types, but not pointers to pointers:
|
||||
|
||||
- int (as int32), int8, int16, int32, int64
|
||||
- uint (as uint32), uint8, uint16, uint32, uint64
|
||||
- bool, float32, float64, string, []byte
|
||||
- Maps, with keys and values of any supported type, except keys with pointer types.
|
||||
- Slices, with elements of any supported type.
|
||||
- time.Time
|
||||
- Types that implement binary.MarshalBinary and binary.UnmarshalBinary, useful
|
||||
for struct types with state in private fields. Do not change the
|
||||
(Un)marshalBinary method in an incompatible way without a data migration.
|
||||
- Structs, with fields of any supported type.
|
||||
|
||||
Note: int and uint are stored as int32 and uint32, for compatibility of database
|
||||
files between 32bit and 64bit systems. Where possible, use explicit (u)int32 or
|
||||
(u)int64 types.
|
||||
|
||||
Embedded structs are handled by storing the individual fields of the embedded
|
||||
struct. The named embedded type is not part of the type schema, and can
|
||||
currently only be used with UpdateField and UpdateFields, not for filtering.
|
||||
|
||||
Bstore embraces the use of Go zero values. Use zero values, possibly pointers,
|
||||
where you would use NULL values in SQL.
|
||||
|
||||
Types that have not yet been implemented: interface values, (fixed length) arrays,
|
||||
complex numbers.
|
||||
|
||||
# Struct tags
|
||||
|
||||
The typical Go struct can be stored in the database. The first field of a
|
||||
struct type is its primary key, and must always be unique. Additional behaviour
|
||||
can be configured through struct tag "bstore". The values are comma-separated.
|
||||
Typically one word, but some have multiple space-separated words:
|
||||
|
||||
- "-" ignores the field entirely.
|
||||
- "name <fieldname>", use "fieldname" instead of the Go type field name.
|
||||
- "nonzero", enforces that field values are not the zero value.
|
||||
- "noauto", only valid for integer types, and only for the primary key. By
|
||||
default, an integer-typed primary key will automatically get a next value
|
||||
assigned on insert when it is 0. With noauto inserting a 0 value results in an
|
||||
error. For primary keys of other types inserting the zero value always results
|
||||
in an error.
|
||||
- "index" or "index <field1+field2+...> [<name>]", adds an index. In the first
|
||||
form, the index is on the field on which the tag is specified, and the index
|
||||
name is the same as the field name. In the second form multiple fields can be
|
||||
specified, and an optional name. The first field must be the field on which
|
||||
the tag is specified. The field names are +-separated. The default name for
|
||||
the second form is the same +-separated string but can be set explicitly to
|
||||
the second parameter. An index can only be set for basic integer types, bools,
|
||||
time and strings. Indices are automatically (re)created when registering a
|
||||
type.
|
||||
- "unique" or "unique <field1+field2+...> [<name>]", adds an index as with
|
||||
"index" and also enforces a unique constraint. For time.Time the timezone is
|
||||
ignored for the uniqueness check.
|
||||
- "ref <type>", enforces that the value exists as primary key for "type".
|
||||
Field types must match exactly, e.g. you cannot reference an int with an int64.
|
||||
An index is automatically created and maintained for fields with a foreign key,
|
||||
for efficiently checking that removed records in the referenced type are not in
|
||||
use. If the field has the zero value, the reference is not checked. If you
|
||||
require a valid reference, add "nonzero".
|
||||
- "default <value>", replaces a zero value with the specified value on record
|
||||
insert. Special value "now" is recognized for time.Time as the current time.
|
||||
Times are parsed as time.RFC3339 otherwise. Supported types: bool
|
||||
("true"/"false"), integers, floats, strings. Value is not quoted and no escaping
|
||||
of special characters, like the comma that separates struct tag words, is
|
||||
possible. Defaults are also replaced on fields in nested structs and
|
||||
slices, but not in maps.
|
||||
- "typename <name>", override name of the type. The name of the Go type is
|
||||
used by default. Can only be present on the first field (primary key).
|
||||
Useful for doing schema updates.
|
||||
|
||||
# Schema updates
|
||||
|
||||
Before using a Go type, you must register it for use with the open database by
|
||||
passing a (zero) value of that type to the Open or Register functions. For each
|
||||
type, a type definition is stored in the database. If a type has an updated
|
||||
definition since the previous database open, a new type definition is added to
|
||||
the database automatically and any required modifications are made: Indexes
|
||||
(re)created, fields added/removed, new nonzero/unique/reference constraints
|
||||
validated.
|
||||
|
||||
If data/types cannot be updated automatically (e.g. converting an int field into
|
||||
a string field), custom data migration code is needed. You may have to keep
|
||||
track of a data/schema version.
|
||||
|
||||
As a special case, you can switch field types between pointer and non-pointer
|
||||
types. With one exception: changing from pointer to non-pointer where the type
|
||||
has a field that must be nonzer is not allowed. The on-disk encoding will not be
|
||||
changed, and nil pointers will turn into zero values, and zero values into nil
|
||||
pointers. Also see section Limitations about pointer types.
|
||||
|
||||
Because named embed structs are not part of the type definition, you can
|
||||
wrap/unwrap fields into a embed/anonymous struct field. No new type definition
|
||||
is created.
|
||||
|
||||
# BoltDB
|
||||
|
||||
BoltDB is used as underlying storage. Bolt provides ACID transactions, storing
|
||||
its data in a B+tree. Only a single write transaction can be active at a time,
|
||||
but otherwise multiple read-only transactions can be active. Do not start a
|
||||
blocking read-only transaction while holding a writable transaction or vice
|
||||
versa, this will cause deadlock.
|
||||
|
||||
Bolt uses Go types that are memory mapped to the database file. This means bolt
|
||||
database files cannot be transferred between machines with different endianness.
|
||||
Bolt uses explicit widths for its types, so files can be transferred between
|
||||
32bit and 64bit machines of same endianness.
|
||||
|
||||
# Limitations
|
||||
|
||||
Bstore does not implement the equivalent of SQL joins, aggregates, and many
|
||||
other concepts.
|
||||
|
||||
Filtering/comparing/sorting on pointer fields is not currently allowed. Pointer
|
||||
fields cannot have a (unique) index due to the current index format. Using zero
|
||||
values is recommended instead for now.
|
||||
|
||||
Integer field types can be expanded to wider types, but not to a different
|
||||
signedness or a smaller integer (fewer bits). The primary key of a type cannot
|
||||
currently be changed.
|
||||
|
||||
The first field of a struct is always the primary key. Types requires an
|
||||
explicit primary key. Autoincrement is only available for the primary key.
|
||||
*/
|
||||
package bstore
|
91
vendor/github.com/mjl-/bstore/equal.go
generated
vendored
Normal file
91
vendor/github.com/mjl-/bstore/equal.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// equal checks if ov and v are the same as far as storage is concerned. i.e.
|
||||
// this only takes stored fields into account. reflect.DeepEqual cannot be used,
|
||||
// it would take all fields into account, including unexported.
|
||||
func (tv *typeVersion) equal(ov, v reflect.Value) (r bool) {
|
||||
if !ov.IsValid() || !v.IsValid() {
|
||||
return false
|
||||
}
|
||||
for _, f := range tv.Fields {
|
||||
fov := ov.FieldByIndex(f.structField.Index)
|
||||
fv := v.FieldByIndex(f.structField.Index)
|
||||
if !f.Type.equal(fov, fv) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (ft fieldType) equal(ov, v reflect.Value) (r bool) {
|
||||
if ov == v {
|
||||
return true
|
||||
} else if !ov.IsValid() || !v.IsValid() {
|
||||
return false
|
||||
}
|
||||
if ft.Ptr {
|
||||
ov = ov.Elem()
|
||||
v = v.Elem()
|
||||
}
|
||||
if ov == v {
|
||||
return true
|
||||
} else if !ov.IsValid() || !v.IsValid() {
|
||||
return false
|
||||
}
|
||||
switch ft.Kind {
|
||||
case kindBytes:
|
||||
return bytes.Equal(ov.Bytes(), v.Bytes())
|
||||
case kindMap:
|
||||
on := ov.Len()
|
||||
n := v.Len()
|
||||
if on != n {
|
||||
return false
|
||||
}
|
||||
r := ov.MapRange()
|
||||
for r.Next() {
|
||||
vv := v.MapIndex(r.Key())
|
||||
if !vv.IsValid() || !ft.MapValue.equal(r.Value(), vv) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case kindSlice:
|
||||
on := ov.Len()
|
||||
n := v.Len()
|
||||
if on != n {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
if !ft.List.equal(ov.Index(i), v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case kindTime:
|
||||
return ov.Interface().(time.Time).Equal(v.Interface().(time.Time))
|
||||
case kindBinaryMarshal:
|
||||
obuf, oerr := ov.Interface().(encoding.BinaryMarshaler).MarshalBinary()
|
||||
buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if oerr != nil || err != nil {
|
||||
return false // todo: should propagate error?
|
||||
}
|
||||
return bytes.Equal(obuf, buf)
|
||||
case kindStruct:
|
||||
for _, f := range ft.Fields {
|
||||
fov := ov.FieldByIndex(f.structField.Index)
|
||||
fv := v.FieldByIndex(f.structField.Index)
|
||||
if !f.Type.equal(fov, fv) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return ov.Interface() == v.Interface()
|
||||
}
|
568
vendor/github.com/mjl-/bstore/exec.go
generated
vendored
Normal file
568
vendor/github.com/mjl-/bstore/exec.go
generated
vendored
Normal file
@ -0,0 +1,568 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// exec represents the execution of a query plan.
|
||||
type exec[T any] struct {
|
||||
q *Query[T]
|
||||
plan *plan[T]
|
||||
|
||||
// For queries with explicit PKs filtered on.
|
||||
// See plan.keys. We remove items from the list when we looked one up, but we keep the slice non-nil.
|
||||
keys [][]byte
|
||||
|
||||
// If -1, no limit is set. This is different from Query where 0 means
|
||||
// no limit. We count back and 0 means the end.
|
||||
limit int
|
||||
|
||||
data []pair[T] // If not nil (even if empty), serve nextKey requests from here.
|
||||
ib *bolt.Bucket
|
||||
rb *bolt.Bucket
|
||||
forward func() (bk, bv []byte) // Once we start scanning, we prepare forward to next/prev to the following value.
|
||||
}
|
||||
|
||||
// exec creates a new execution for the plan, registering statistics.
|
||||
func (p *plan[T]) exec(q *Query[T]) *exec[T] {
|
||||
q.stats.Queries++
|
||||
if p.idx == nil {
|
||||
if p.keys != nil {
|
||||
q.stats.PlanPK++
|
||||
} else if p.start != nil || p.stop != nil {
|
||||
q.stats.PlanPKScan++
|
||||
} else {
|
||||
q.stats.PlanTableScan++
|
||||
}
|
||||
q.stats.LastIndex = ""
|
||||
} else {
|
||||
if p.keys != nil {
|
||||
q.stats.PlanUnique++
|
||||
} else {
|
||||
q.stats.PlanIndexScan++
|
||||
}
|
||||
q.stats.LastIndex = p.idx.Name
|
||||
}
|
||||
if len(p.orders) > 0 {
|
||||
q.stats.Sort++
|
||||
}
|
||||
q.stats.LastOrdered = p.start != nil || p.stop != nil
|
||||
q.stats.LastAsc = !p.desc
|
||||
|
||||
limit := -1
|
||||
if q.xlimit > 0 {
|
||||
limit = q.xlimit
|
||||
}
|
||||
return &exec[T]{q: q, plan: p, keys: p.keys, limit: limit}
|
||||
}
|
||||
|
||||
// incr treats buf as a bigendian number, increasing it by one. used for reverse
|
||||
// scans, where we must start beyond the key prefix we are looking for.
|
||||
func incr(buf []byte) bool {
|
||||
for i := len(buf) - 1; i >= 0; i-- {
|
||||
if buf[i] < 255 {
|
||||
buf[i]++
|
||||
return true
|
||||
}
|
||||
buf[i] = 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cutoff(b []byte, n int) []byte {
|
||||
if len(b) <= n {
|
||||
return b
|
||||
}
|
||||
return b[:n]
|
||||
}
|
||||
|
||||
// nextKey returns the key and optionally value for the next selected record.
|
||||
//
|
||||
// ErrAbsent is returned if there is no more record.
|
||||
//
|
||||
// If an error occurs, an error is set on query, except in the case of
|
||||
// ErrAbsent. ErrAbsent does not finish the query because a Delete or Update
|
||||
// could follow.
|
||||
func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
|
||||
var zero T
|
||||
|
||||
q := e.q
|
||||
|
||||
if q.err != nil {
|
||||
return nil, zero, q.err
|
||||
}
|
||||
|
||||
// We collected & sorted data previously. Return from it until done.
|
||||
// Limit was already applied.
|
||||
if e.data != nil {
|
||||
if len(e.data) == 0 {
|
||||
return nil, zero, ErrAbsent
|
||||
}
|
||||
p := e.data[0]
|
||||
e.data = e.data[1:]
|
||||
var v T
|
||||
if value {
|
||||
var err error
|
||||
v, err = p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return nil, zero, err
|
||||
}
|
||||
}
|
||||
return p.bk, v, nil
|
||||
}
|
||||
|
||||
if e.limit == 0 {
|
||||
return nil, zero, ErrAbsent
|
||||
}
|
||||
|
||||
// First time we are going to need buckets.
|
||||
if e.rb == nil {
|
||||
tx, err := q.tx(write)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return nil, zero, err
|
||||
}
|
||||
e.rb, err = tx.recordsBucket(q.st.Name, q.st.Current.fillPercent)
|
||||
if err != nil {
|
||||
return nil, zero, err
|
||||
}
|
||||
if e.plan.idx != nil {
|
||||
e.ib, err = tx.indexBucket(e.plan.idx)
|
||||
if err != nil {
|
||||
return nil, zero, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List of IDs (records) or full unique index equality match.
|
||||
// We can get the records/index value by a simple "get" on the key.
|
||||
if e.keys != nil {
|
||||
collect := len(e.plan.orders) > 0
|
||||
if collect {
|
||||
e.data = []pair[T]{} // Must be non-nil to get into e.data branch!
|
||||
}
|
||||
for i, xk := range e.keys {
|
||||
var bk, bv []byte
|
||||
|
||||
// For indices, we need look up the PK through the index.
|
||||
if e.plan.idx != nil {
|
||||
c := e.ib.Cursor()
|
||||
q.stats.Index.Cursor++
|
||||
bki, _ := c.Seek(xk)
|
||||
if !bytes.HasPrefix(bki, xk) {
|
||||
continue
|
||||
}
|
||||
// log.Printf("seek %x, bki %x", xk, bki)
|
||||
bk = bki[len(xk):]
|
||||
} else {
|
||||
bk = xk
|
||||
}
|
||||
|
||||
// We don't need to fetch the full record now if it isn't needed by
|
||||
// caller. It may be fetch below for more filters.
|
||||
if value || e.plan.idx == nil {
|
||||
q.stats.Records.Get++
|
||||
bv = e.rb.Get(bk)
|
||||
if bv == nil {
|
||||
if e.plan.idx != nil {
|
||||
return nil, zero, fmt.Errorf("%w: record with pk %x referenced through index %q not found", ErrStore, bk, e.plan.idx.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
p := pair[T]{bk, bv, nil}
|
||||
if ok, err := e.checkFilter(&p); err != nil {
|
||||
return nil, zero, err
|
||||
} else if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if collect {
|
||||
e.data = append(e.data, p)
|
||||
continue
|
||||
}
|
||||
|
||||
// Again, only fetch value if needed.
|
||||
var v T
|
||||
if value {
|
||||
var err error
|
||||
v, err = p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return nil, zero, err
|
||||
}
|
||||
}
|
||||
|
||||
if e.limit > 0 {
|
||||
e.limit--
|
||||
}
|
||||
|
||||
e.keys = e.keys[i+1:]
|
||||
return bk, v, nil
|
||||
}
|
||||
if !collect {
|
||||
return nil, zero, ErrAbsent
|
||||
}
|
||||
// Restart, now with data.
|
||||
e.keys = [][]byte{}
|
||||
e.sort()
|
||||
if e.limit > 0 && len(e.data) > e.limit {
|
||||
e.data = e.data[:e.limit]
|
||||
}
|
||||
return q.nextKey(write, value)
|
||||
}
|
||||
|
||||
// We are going to do a scan, either over the records or an index. We may have a start and stop key.
|
||||
collect := len(e.plan.orders) > 0
|
||||
if collect {
|
||||
e.data = []pair[T]{} // Must be non-nil to get into e.data branch on function restart.
|
||||
}
|
||||
for {
|
||||
var xk, xv []byte
|
||||
if e.forward == nil {
|
||||
// First time we are in this loop, we set up a cursor and e.forward.
|
||||
|
||||
var c *bolt.Cursor
|
||||
var statsKV *StatsKV
|
||||
if e.plan.idx == nil {
|
||||
c = e.rb.Cursor()
|
||||
statsKV = &q.stats.Records
|
||||
} else {
|
||||
c = e.ib.Cursor()
|
||||
statsKV = &q.stats.Index
|
||||
}
|
||||
if !e.plan.desc {
|
||||
e.forward = c.Next
|
||||
if e.plan.start != nil {
|
||||
statsKV.Cursor++
|
||||
// If e.plan.start does not exist, seek will skip to the
|
||||
// next value after. Fine because this is ascending order.
|
||||
xk, xv = c.Seek(e.plan.start)
|
||||
} else {
|
||||
statsKV.Cursor++
|
||||
xk, xv = c.First()
|
||||
}
|
||||
} else {
|
||||
e.forward = c.Prev
|
||||
if e.plan.start == nil {
|
||||
statsKV.Cursor++
|
||||
xk, xv = c.Last()
|
||||
} else {
|
||||
start := make([]byte, len(e.plan.start))
|
||||
copy(start, e.plan.start)
|
||||
ok := incr(start)
|
||||
if !ok {
|
||||
statsKV.Cursor++
|
||||
// We were at the last representable value. So we simply start at the end.
|
||||
xk, xv = c.Last()
|
||||
} else {
|
||||
statsKV.Cursor++
|
||||
xk, xv = c.Seek(start)
|
||||
if xk == nil {
|
||||
statsKV.Cursor++
|
||||
xk, xv = c.Last()
|
||||
}
|
||||
// We started at the value after where we were requested to start, so we have to
|
||||
// move until we find a matching key.
|
||||
// todo: we could take e.plan.stop into account (if set). right now we may be
|
||||
// seeking all the way to the front without ever seeing a match to stop.
|
||||
for xk != nil && bytes.Compare(cutoff(xk, len(e.plan.start)), e.plan.start) > 0 {
|
||||
statsKV.Cursor++
|
||||
xk, xv = e.forward()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if e.plan.idx == nil {
|
||||
q.stats.Records.Cursor++
|
||||
} else {
|
||||
q.stats.Index.Cursor++
|
||||
}
|
||||
xk, xv = e.forward()
|
||||
// log.Printf("forwarded, %x %x", xk, xv)
|
||||
}
|
||||
|
||||
if xk == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if e.plan.start != nil && !e.plan.startInclusive && bytes.HasPrefix(xk, e.plan.start) {
|
||||
continue
|
||||
}
|
||||
if e.plan.stop != nil {
|
||||
cmp := bytes.Compare(cutoff(xk, len(e.plan.stop)), e.plan.stop)
|
||||
if !e.plan.desc && (e.plan.stopInclusive && cmp > 0 || !e.plan.stopInclusive && cmp >= 0) {
|
||||
break
|
||||
} else if e.plan.desc && (e.plan.stopInclusive && cmp < 0 || !e.plan.stopInclusive && cmp <= 0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var pk, bv []byte
|
||||
if e.plan.idx == nil {
|
||||
pk = xk
|
||||
bv = xv
|
||||
} else {
|
||||
var err error
|
||||
pk, _, err = e.plan.idx.parseKey(xk, false)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return nil, zero, err
|
||||
}
|
||||
}
|
||||
|
||||
p := pair[T]{pk, bv, nil}
|
||||
if ok, err := e.checkFilter(&p); err != nil {
|
||||
return nil, zero, err
|
||||
} else if !ok {
|
||||
continue
|
||||
}
|
||||
//log.Printf("have kv, %x %x", p.bk, p.bv)
|
||||
var v T
|
||||
var err error
|
||||
if value {
|
||||
v, err = p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return nil, zero, err
|
||||
}
|
||||
}
|
||||
if collect {
|
||||
e.data = append(e.data, p)
|
||||
continue
|
||||
}
|
||||
if e.limit > 0 {
|
||||
e.limit--
|
||||
}
|
||||
return p.bk, v, nil
|
||||
}
|
||||
if !collect {
|
||||
return nil, zero, ErrAbsent
|
||||
}
|
||||
// Restart, now with data.
|
||||
e.sort()
|
||||
if e.limit > 0 && len(e.data) > e.limit {
|
||||
e.data = e.data[:e.limit]
|
||||
}
|
||||
return e.nextKey(write, value)
|
||||
}
|
||||
|
||||
// checkFilter checks against the filters for the plan.
|
||||
func (e *exec[T]) checkFilter(p *pair[T]) (rok bool, rerr error) {
|
||||
q := e.q
|
||||
|
||||
for _, ff := range e.plan.filters {
|
||||
switch f := ff.(type) {
|
||||
// note: filterIDs is not here, it is handled earlier to fetch records.
|
||||
case filterFn[T]:
|
||||
v, err := p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return false, err
|
||||
}
|
||||
if !f.fn(v) {
|
||||
return
|
||||
}
|
||||
case filterEqual[T]:
|
||||
v, err := p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return false, err
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
frv := rv.FieldByIndex(f.field.structField.Index)
|
||||
if !f.field.Type.equal(frv, f.rvalue) {
|
||||
return
|
||||
}
|
||||
case filterNotEqual[T]:
|
||||
v, err := p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return false, err
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
frv := rv.FieldByIndex(f.field.structField.Index)
|
||||
if f.field.Type.equal(frv, f.rvalue) {
|
||||
return
|
||||
}
|
||||
case filterIn[T]:
|
||||
v, err := p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return false, err
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
frv := rv.FieldByIndex(f.field.structField.Index)
|
||||
var have bool
|
||||
for _, xrv := range f.rvalues {
|
||||
if f.field.Type.equal(frv, xrv) {
|
||||
have = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !have {
|
||||
return
|
||||
}
|
||||
case filterNotIn[T]:
|
||||
v, err := p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return false, err
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
frv := rv.FieldByIndex(f.field.structField.Index)
|
||||
for _, xrv := range f.rvalues {
|
||||
if f.field.Type.equal(frv, xrv) {
|
||||
return
|
||||
}
|
||||
}
|
||||
case filterCompare[T]:
|
||||
v, err := p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return false, err
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
fv := rv.FieldByIndex(f.field.structField.Index)
|
||||
cmp := compare(f.field.Type.Kind, fv, f.value)
|
||||
switch {
|
||||
case cmp == 0 && (f.op == opGreaterEqual || f.op == opLessEqual):
|
||||
case cmp < 0 && (f.op == opLess || f.op == opLessEqual):
|
||||
case cmp > 0 && (f.op == opGreater || f.op == opGreaterEqual):
|
||||
default:
|
||||
return
|
||||
}
|
||||
default:
|
||||
q.errorf("internal error: missing case for filter %T", ff)
|
||||
return false, q.err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// if type can be compared for filterCompare, eg for greater/less comparison.
|
||||
func comparable(ft fieldType) bool {
|
||||
if ft.Ptr {
|
||||
return false
|
||||
}
|
||||
switch ft.Kind {
|
||||
case kindBytes, kindString, kindBool, kindInt8, kindInt16, kindInt32, kindInt64, kindInt, kindUint8, kindUint16, kindUint32, kindUint64, kindUint, kindFloat32, kindFloat64, kindTime:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func compare(k kind, a, b reflect.Value) int {
|
||||
switch k {
|
||||
case kindBytes:
|
||||
return bytes.Compare(a.Bytes(), b.Bytes())
|
||||
|
||||
case kindString:
|
||||
sa := a.String()
|
||||
sb := b.String()
|
||||
if sa < sb {
|
||||
return -1
|
||||
} else if sa > sb {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
|
||||
case kindBool:
|
||||
ba := a.Bool()
|
||||
bb := b.Bool()
|
||||
if !ba && bb {
|
||||
return -1
|
||||
} else if ba && !bb {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
|
||||
case kindInt8, kindInt16, kindInt32, kindInt64, kindInt:
|
||||
ia := a.Int()
|
||||
ib := b.Int()
|
||||
if ia < ib {
|
||||
return -1
|
||||
} else if ia > ib {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
|
||||
case kindUint8, kindUint16, kindUint32, kindUint64, kindUint:
|
||||
ia := a.Uint()
|
||||
ib := b.Uint()
|
||||
if ia < ib {
|
||||
return -1
|
||||
} else if ia > ib {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
|
||||
case kindFloat32, kindFloat64:
|
||||
fa := a.Float()
|
||||
fb := b.Float()
|
||||
if fa < fb {
|
||||
return -1
|
||||
} else if fa > fb {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
|
||||
case kindTime:
|
||||
ta := a.Interface().(time.Time)
|
||||
tb := b.Interface().(time.Time)
|
||||
if ta.Before(tb) {
|
||||
return -1
|
||||
} else if ta.After(tb) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
// todo: internal error, cannot happen
|
||||
return 0
|
||||
}
|
||||
|
||||
func (e *exec[T]) sort() {
|
||||
// todo: We should check whether we actually need to load values. We're just
|
||||
// always it now for the time being because SortStableFunc isn't going to
|
||||
// give us a *pair (even though it could because of the slice) so we
|
||||
// couldn't set/cache the value T during sorting.
|
||||
q := e.q
|
||||
|
||||
for i := range e.data {
|
||||
p := &e.data[i]
|
||||
if p.value != nil {
|
||||
continue
|
||||
}
|
||||
_, err := p.Value(e)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(e.data, func(i, j int) bool {
|
||||
a := e.data[i]
|
||||
b := e.data[j]
|
||||
for _, o := range e.plan.orders {
|
||||
ra := reflect.ValueOf(*a.value)
|
||||
rb := reflect.ValueOf(*b.value)
|
||||
rva := ra.FieldByIndex(o.field.structField.Index)
|
||||
rvb := rb.FieldByIndex(o.field.structField.Index)
|
||||
cmp := compare(o.field.Type.Kind, rva, rvb)
|
||||
if cmp == 0 {
|
||||
continue
|
||||
}
|
||||
return cmp < 0 && o.asc || cmp > 0 && !o.asc
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
387
vendor/github.com/mjl-/bstore/export.go
generated
vendored
Normal file
387
vendor/github.com/mjl-/bstore/export.go
generated
vendored
Normal file
@ -0,0 +1,387 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Types returns the types present in the database, regardless of whether they
|
||||
// are currently registered using Open or Register. Useful for exporting data
|
||||
// with Keys and Records.
|
||||
func (db *DB) Types() ([]string, error) {
|
||||
var types []string
|
||||
err := db.Read(func(tx *Tx) error {
|
||||
return tx.btx.ForEach(func(bname []byte, b *bolt.Bucket) error {
|
||||
// note: we do not track stats for types operations.
|
||||
|
||||
types = append(types, string(bname))
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types, nil
|
||||
}
|
||||
|
||||
// prepareType prepares typeName for export/introspection with DB.Keys,
|
||||
// DB.Record, DB.Records. It is different in that it does not require a
|
||||
// reflect.Type to parse into. It parses to a map, e.g. for export to JSON. The
|
||||
// returned typeVersion has no structFields set in its fields.
|
||||
func (db *DB) prepareType(tx *Tx, typeName string) (map[uint32]*typeVersion, *typeVersion, *bolt.Bucket, []string, error) {
|
||||
rb, err := tx.recordsBucket(typeName, 0.5)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
tb, err := tx.bucket(bucketKey{typeName, "types"})
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
versions := map[uint32]*typeVersion{}
|
||||
var tv *typeVersion
|
||||
err = tb.ForEach(func(bk, bv []byte) error {
|
||||
// note: we do not track stats for types operations.
|
||||
|
||||
ntv, err := parseSchema(bk, bv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
versions[ntv.Version] = ntv
|
||||
if tv == nil || ntv.Version > tv.Version {
|
||||
tv = ntv
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
if tv == nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("%w: no type versions", ErrStore)
|
||||
}
|
||||
fields := make([]string, len(tv.Fields))
|
||||
for i, f := range tv.Fields {
|
||||
fields[i] = f.Name
|
||||
}
|
||||
return versions, tv, rb, fields, nil
|
||||
}
|
||||
|
||||
// Keys returns the parsed primary keys for the type "typeName". The type does
|
||||
// not have to be registered with Open or Register. For use with Record(s) to
|
||||
// export data.
|
||||
func (db *DB) Keys(typeName string, fn func(pk any) error) error {
|
||||
return db.Read(func(tx *Tx) error {
|
||||
_, tv, rb, _, err := db.prepareType(tx, typeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// todo: do not pass nil parser?
|
||||
v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(nil))).Elem()
|
||||
return rb.ForEach(func(bk, bv []byte) error {
|
||||
tx.stats.Records.Cursor++
|
||||
|
||||
if err := parsePK(v, bk); err != nil {
|
||||
return err
|
||||
}
|
||||
return fn(v.Interface())
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Record returns the record with primary "key" for "typeName" parsed as map.
|
||||
// "Fields" is set to the fields of the type. The type does not have to be
|
||||
// registered with Open or Register. Record parses the data without the Go
|
||||
// type present. BinaryMarshal fields are returned as bytes.
|
||||
func (db *DB) Record(typeName, key string, fields *[]string) (map[string]any, error) {
|
||||
var r map[string]any
|
||||
err := db.Read(func(tx *Tx) error {
|
||||
versions, tv, rb, xfields, err := db.prepareType(tx, typeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*fields = xfields
|
||||
|
||||
var kv any
|
||||
switch tv.Fields[0].Type.Kind {
|
||||
case kindBool:
|
||||
switch key {
|
||||
case "true":
|
||||
kv = true
|
||||
case "false":
|
||||
kv = false
|
||||
default:
|
||||
err = fmt.Errorf("%w: invalid bool %q", ErrParam, key)
|
||||
}
|
||||
case kindInt8:
|
||||
kv, err = strconv.ParseInt(key, 10, 8)
|
||||
case kindInt16:
|
||||
kv, err = strconv.ParseInt(key, 10, 16)
|
||||
case kindInt32:
|
||||
kv, err = strconv.ParseInt(key, 10, 32)
|
||||
case kindInt:
|
||||
kv, err = strconv.ParseInt(key, 10, 32)
|
||||
case kindInt64:
|
||||
kv, err = strconv.ParseInt(key, 10, 64)
|
||||
case kindUint8:
|
||||
kv, err = strconv.ParseUint(key, 10, 8)
|
||||
case kindUint16:
|
||||
kv, err = strconv.ParseUint(key, 10, 16)
|
||||
case kindUint32:
|
||||
kv, err = strconv.ParseUint(key, 10, 32)
|
||||
case kindUint:
|
||||
kv, err = strconv.ParseUint(key, 10, 32)
|
||||
case kindUint64:
|
||||
kv, err = strconv.ParseUint(key, 10, 64)
|
||||
case kindString:
|
||||
kv = key
|
||||
case kindBytes:
|
||||
kv = []byte(key) // todo: or decode from base64?
|
||||
default:
|
||||
return fmt.Errorf("internal error: unknown primary key kind %v", tv.Fields[0].Type.Kind)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkv := reflect.ValueOf(kv)
|
||||
kind, err := typeKind(pkv.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if kind != tv.Fields[0].Type.Kind {
|
||||
// Convert from various int types above to required type. The ParseInt/ParseUint
|
||||
// calls already validated that the values fit.
|
||||
pkt := reflect.TypeOf(tv.Fields[0].Type.zero(nil))
|
||||
pkv = pkv.Convert(pkt)
|
||||
}
|
||||
k, err := packPK(pkv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx.stats.Records.Get++
|
||||
bv := rb.Get(k)
|
||||
if bv == nil {
|
||||
return ErrAbsent
|
||||
}
|
||||
record, err := parseMap(versions, k, bv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r = record
|
||||
return nil
|
||||
})
|
||||
return r, err
|
||||
}
|
||||
|
||||
// Records calls "fn" for each record of "typeName". Records sets "fields" to
|
||||
// the fields of the type. The type does not have to be registered with Open or
|
||||
// Register. Record parses the data without the Go type present. BinaryMarshal
|
||||
// fields are returned as bytes.
|
||||
func (db *DB) Records(typeName string, fields *[]string, fn func(map[string]any) error) error {
|
||||
return db.Read(func(tx *Tx) error {
|
||||
versions, _, rb, xfields, err := db.prepareType(tx, typeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*fields = xfields
|
||||
|
||||
return rb.ForEach(func(bk, bv []byte) error {
|
||||
tx.stats.Records.Cursor++
|
||||
|
||||
record, err := parseMap(versions, bk, bv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn(record)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// parseMap parses a record into a map with the right typeVersion from versions.
|
||||
func parseMap(versions map[uint32]*typeVersion, bk, bv []byte) (record map[string]any, rerr error) {
|
||||
p := &parser{buf: bv, orig: bv}
|
||||
var version uint32
|
||||
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
if err, ok := x.(parseErr); ok {
|
||||
rerr = fmt.Errorf("%w (version %d, buf %x orig %x)", err.err, version, p.buf, p.orig)
|
||||
return
|
||||
}
|
||||
panic(x)
|
||||
}()
|
||||
|
||||
version = uint32(p.Uvarint())
|
||||
tv := versions[version]
|
||||
if tv == nil {
|
||||
return nil, fmt.Errorf("%w: unknown type version %d", ErrStore, version)
|
||||
}
|
||||
|
||||
r := map[string]any{}
|
||||
|
||||
v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(p))).Elem()
|
||||
err := parsePK(v, bk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r[tv.Fields[0].Name] = v.Interface()
|
||||
|
||||
// todo: Should we be looking at the most recent tv, and hiding fields
|
||||
// that have been removed in a later typeVersion? Like we do for real
|
||||
// parsing into reflect value?
|
||||
fm := p.Fieldmap(len(tv.Fields) - 1)
|
||||
for i, f := range tv.Fields[1:] {
|
||||
if fm.Nonzero(i) {
|
||||
r[f.Name] = f.Type.parseValue(p)
|
||||
} else {
|
||||
r[f.Name] = f.Type.zero(p)
|
||||
}
|
||||
}
|
||||
|
||||
if len(p.buf) != 0 {
|
||||
return nil, fmt.Errorf("%w: leftover data after parsing", ErrStore)
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (ft fieldType) parseValue(p *parser) any {
|
||||
switch ft.Kind {
|
||||
case kindBytes:
|
||||
return p.TakeBytes(false)
|
||||
case kindBinaryMarshal:
|
||||
// We don't have the type available, so we just return the binary data.
|
||||
return p.TakeBytes(false)
|
||||
case kindBool:
|
||||
return true
|
||||
case kindInt8:
|
||||
return int8(p.Varint())
|
||||
case kindInt16:
|
||||
return int16(p.Varint())
|
||||
case kindInt32:
|
||||
return int32(p.Varint())
|
||||
case kindInt:
|
||||
i := p.Varint()
|
||||
if i < math.MinInt32 || i > math.MaxInt32 {
|
||||
p.Errorf("%w: int %d does not fit in int32", ErrStore, i)
|
||||
}
|
||||
return int(i)
|
||||
case kindInt64:
|
||||
return p.Varint()
|
||||
case kindUint8:
|
||||
return uint8(p.Uvarint())
|
||||
case kindUint16:
|
||||
return uint16(p.Uvarint())
|
||||
case kindUint32:
|
||||
return uint32(p.Uvarint())
|
||||
case kindUint:
|
||||
i := p.Uvarint()
|
||||
if i > math.MaxUint32 {
|
||||
p.Errorf("%w: uint %d does not fit in uint32", ErrStore, i)
|
||||
}
|
||||
return uint(i)
|
||||
case kindUint64:
|
||||
return p.Uvarint()
|
||||
case kindFloat32:
|
||||
return math.Float32frombits(uint32(p.Uvarint()))
|
||||
case kindFloat64:
|
||||
return math.Float64frombits(p.Uvarint())
|
||||
case kindString:
|
||||
return string(p.TakeBytes(false))
|
||||
case kindTime:
|
||||
var t time.Time
|
||||
err := t.UnmarshalBinary(p.TakeBytes(false))
|
||||
if err != nil {
|
||||
p.Errorf("%w: parsing time: %v", ErrStore, err)
|
||||
}
|
||||
return t
|
||||
case kindSlice:
|
||||
un := p.Uvarint()
|
||||
n := p.checkInt(un)
|
||||
fm := p.Fieldmap(n)
|
||||
var l []any
|
||||
for i := 0; i < n; i++ {
|
||||
if fm.Nonzero(i) {
|
||||
l = append(l, ft.List.parseValue(p))
|
||||
} else {
|
||||
// Always add non-zero elements, or we would
|
||||
// change the number of elements in a list.
|
||||
l = append(l, ft.List.zero(p))
|
||||
}
|
||||
}
|
||||
return l
|
||||
case kindMap:
|
||||
un := p.Uvarint()
|
||||
n := p.checkInt(un)
|
||||
fm := p.Fieldmap(n)
|
||||
m := map[string]any{}
|
||||
for i := 0; i < n; i++ {
|
||||
// Converting to string can be ugly, but the best we can do.
|
||||
k := fmt.Sprintf("%v", ft.MapKey.parseValue(p))
|
||||
if _, ok := m[k]; ok {
|
||||
return fmt.Errorf("%w: duplicate key %q in map", ErrStore, k)
|
||||
}
|
||||
var v any
|
||||
if fm.Nonzero(i) {
|
||||
v = ft.MapValue.parseValue(p)
|
||||
} else {
|
||||
v = ft.MapValue.zero(p)
|
||||
}
|
||||
m[k] = v
|
||||
}
|
||||
return m
|
||||
case kindStruct:
|
||||
fm := p.Fieldmap(len(ft.Fields))
|
||||
m := map[string]any{}
|
||||
for i, f := range ft.Fields {
|
||||
if fm.Nonzero(i) {
|
||||
m[f.Name] = f.Type.parseValue(p)
|
||||
} else {
|
||||
m[f.Name] = f.Type.zero(p)
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
p.Errorf("internal error: unhandled field type %v", ft.Kind)
|
||||
panic("cannot happen")
|
||||
}
|
||||
|
||||
var zerovalues = map[kind]any{
|
||||
kindBytes: []byte(nil),
|
||||
kindBinaryMarshal: []byte(nil), // We don't have the actual type available, so we just return binary data.
|
||||
kindBool: false,
|
||||
kindInt8: int8(0),
|
||||
kindInt16: int16(0),
|
||||
kindInt32: int32(0),
|
||||
kindInt: int(0),
|
||||
kindInt64: int64(0),
|
||||
kindUint8: uint8(0),
|
||||
kindUint16: uint16(0),
|
||||
kindUint32: uint32(0),
|
||||
kindUint: uint(0),
|
||||
kindUint64: uint64(0),
|
||||
kindFloat32: float32(0),
|
||||
kindFloat64: float64(0),
|
||||
kindString: "",
|
||||
kindTime: zerotime,
|
||||
kindSlice: []any(nil),
|
||||
kindMap: map[string]any(nil),
|
||||
kindStruct: map[string]any(nil),
|
||||
}
|
||||
|
||||
func (ft fieldType) zero(p *parser) any {
|
||||
v, ok := zerovalues[ft.Kind]
|
||||
if !ok {
|
||||
p.Errorf("internal error: unhandled zero value for field type %v", ft.Kind)
|
||||
}
|
||||
return v
|
||||
}
|
78
vendor/github.com/mjl-/bstore/format.md
generated
vendored
Normal file
78
vendor/github.com/mjl-/bstore/format.md
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
# Types
|
||||
|
||||
Each Go type is stored in its own bucket, after its name. Only subbuckets are
|
||||
created directly below a type bucket, no key/values. Two subbuckets are always
|
||||
created: "records" for the data, "types" for the type definitions. Each index
|
||||
is stored in a subbucket named "index." followed by the name. Unique and
|
||||
non-unique indices use the same encoding.
|
||||
|
||||
# Type versions
|
||||
|
||||
Type definitions are stored in the "types" subbucket. The key is a 4 byte
|
||||
uint32, a version as referenced from a data record. The value is a JSON-encoded
|
||||
representation of the typeVersion struct.
|
||||
|
||||
When a new Go type or changed Go type is registered with a database, a new type
|
||||
version is added to the "types" subbucket. Data is always inserted/updated with
|
||||
the most recent type version. But the database may still hold data records
|
||||
referencing older type versions. Bstore decodes a packed data record with the
|
||||
referenced type version. For storage efficiency: the type version is reused for
|
||||
many stored records, a self-describing format (like JSON) would duplicate the
|
||||
field names in each stored record.
|
||||
|
||||
# Record storage
|
||||
|
||||
Primary keys of types are used as BoltDB keys and can be of bool, integer
|
||||
types, strings or byte slices. Floats, time, struct, slice, map, binarymarshal
|
||||
cannot be stored as primary key. Bools are stored as a single byte 0 or 1.
|
||||
Integers are stored in their fixed width encoding (eg 4 bytes for 32 bit int).
|
||||
Signed integers are stored so the fixed-width byte value is ordered for all
|
||||
signed values, i.e. math.MinInt32 is stored as 4 bytes bigendian with value 0.
|
||||
For strings and byte slices, only their bytes are stored.
|
||||
|
||||
The value stored with a BoltDB key starts with a uvarint "version" of the type.
|
||||
This refers to a version in the "types" bucket. The primary key is not encoded
|
||||
again in the data record itself. The remaining fields are space-efficiently
|
||||
encoded.
|
||||
|
||||
After the uvarint version follow as many bytes to fit a bitmap for the direct
|
||||
struct fields in the type description. Each bit indicates if the value is
|
||||
nonzero and present in the value that follows. Only non-zero values take up
|
||||
more space than the single bit and are stored consecutively after the fieldmap:
|
||||
|
||||
- Pointers are stored as their non-pointer value. If the pointer is nil, it
|
||||
is zero in the fieldmap.
|
||||
- If the underlying type is an signed int or float, or unsigned int, then
|
||||
varint/uvarint encoding from encoding/binary is used.
|
||||
- If the underlying type is a string or []byte, uvarint count followed by the
|
||||
bytes.
|
||||
- If the underlying type is a bool, the value is always true and no
|
||||
additional data is present to represent the value. False is represented by
|
||||
the zero value marked in the fieldmap.
|
||||
- Slices use a uvarint for the number of elements, followed by a bitmap for
|
||||
nonzero values, followed by the encoded nonzero elements.
|
||||
- Maps use a uvariant for the number of key/value pairs, followed by a
|
||||
fieldmap for the values (the keys are always present), followed by each
|
||||
pair: key (always present), value (only if nonzero); key, value; etc.
|
||||
- If a type is an encoding.BinaryUnmarshaler and encoding.BinaryMarshaler,
|
||||
then its bytes are stored prefixed with its uvarint length.
|
||||
- If the type is a struct, its fields are encoded with a field map followed
|
||||
by the its nonzero field values.
|
||||
- Other types cannot be represented currently.
|
||||
|
||||
In a new type version, the type of a field can be changed as long as existing
|
||||
records can be decoded into the new Go type. E.g. you can change an int32 into
|
||||
a int64. You can only change an int64 into a int32 if all values you attempt to
|
||||
read are small enough to fit in an int32. You cannot change between signed and
|
||||
unsigned integer, or between string and []byte.
|
||||
|
||||
# Index storage
|
||||
|
||||
Indexes are stored in subbuckets, named starting with "index." followed by the
|
||||
index name. Keys are a self-delimiting encodings of the fields that make up the
|
||||
key, followed by the primary key for the "records" bucket. Values are always
|
||||
empty in index buckets. For bool and integer types, the same fixed with
|
||||
encoding as for primary keys in the "records" subbucket is used. Strings are
|
||||
encoded by their bytes (no \0 allowed) followed by a delimiting \0. Unlike
|
||||
primary keys, an index can cover a field with type time.Time. Times are encoded
|
||||
with 8 byte seconds followed by the remaining 4 bytes nanoseconds.
|
13
vendor/github.com/mjl-/bstore/gendoc.sh
generated
vendored
Normal file
13
vendor/github.com/mjl-/bstore/gendoc.sh
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
(
|
||||
cat <<EOF
|
||||
/*
|
||||
Command bstore provides commands for inspecting a bstore database.
|
||||
|
||||
Subcommands:
|
||||
|
||||
EOF
|
||||
go run cmd/bstore/bstore.go 2>&1 | sed 's/^/ /' | grep -v 'exit status'
|
||||
echo '*/'
|
||||
echo 'package main'
|
||||
) >cmd/bstore/doc.go
|
282
vendor/github.com/mjl-/bstore/keys.go
generated
vendored
Normal file
282
vendor/github.com/mjl-/bstore/keys.go
generated
vendored
Normal file
@ -0,0 +1,282 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
The records buckets map a primary key to the record data. The primary key is of
|
||||
a form that we can scan/range over. So fixed with for integers. For strings and
|
||||
bytes they are just their byte representation. We do not store the PK in the
|
||||
record data. This means we cannot store a time.Time as primary key, because we
|
||||
cannot have the timezone encoded for comparison reasons.
|
||||
|
||||
Index keys are similar to PK's. Unique and non-unique indices are encoded the
|
||||
same. The stored values are always empty, the key consists of the field values
|
||||
the index was created for, followed by the PK. The encoding of a field is nearly
|
||||
the same as the encoding of that type as a primary key. The differences: strings
|
||||
end with a \0 to make them self-delimiting; byte slices are not allowed because
|
||||
they are not self-delimiting; time.Time is allowed because the time is available
|
||||
in full (with timezone) in the record data.
|
||||
*/
|
||||
|
||||
// packPK returns the PK bytes representation for the PK value rv.
|
||||
func packPK(rv reflect.Value) ([]byte, error) {
|
||||
kv := rv.Interface()
|
||||
var buf []byte
|
||||
switch k := kv.(type) {
|
||||
case string:
|
||||
buf = []byte(k)
|
||||
case []byte:
|
||||
buf = k
|
||||
case bool:
|
||||
var b byte
|
||||
if k {
|
||||
b = 1
|
||||
}
|
||||
buf = []byte{b}
|
||||
case int8:
|
||||
buf = []byte{byte(uint8(k + math.MinInt8))}
|
||||
case int16:
|
||||
buf = binary.BigEndian.AppendUint16(nil, uint16(k+math.MinInt16))
|
||||
case int32:
|
||||
buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32))
|
||||
case int:
|
||||
if k < math.MinInt32 || k > math.MaxInt32 {
|
||||
return nil, fmt.Errorf("%w: int %d does not fit in int32", ErrParam, k)
|
||||
}
|
||||
buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32))
|
||||
case int64:
|
||||
buf = binary.BigEndian.AppendUint64(nil, uint64(k+math.MinInt64))
|
||||
case uint8:
|
||||
buf = []byte{k}
|
||||
case uint16:
|
||||
buf = binary.BigEndian.AppendUint16(nil, k)
|
||||
case uint32:
|
||||
buf = binary.BigEndian.AppendUint32(nil, k)
|
||||
case uint:
|
||||
if k > math.MaxUint32 {
|
||||
return nil, fmt.Errorf("%w: uint %d does not fit in uint32", ErrParam, k)
|
||||
}
|
||||
buf = binary.BigEndian.AppendUint32(nil, uint32(k))
|
||||
case uint64:
|
||||
buf = binary.BigEndian.AppendUint64(nil, k)
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: unsupported primary key type %T", ErrType, kv)
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// parsePK parses primary key bk into rv.
|
||||
func parsePK(rv reflect.Value, bk []byte) error {
|
||||
k, err := typeKind(rv.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch k {
|
||||
case kindBytes:
|
||||
buf := make([]byte, len(bk))
|
||||
copy(buf, bk)
|
||||
rv.SetBytes(buf)
|
||||
return nil
|
||||
case kindString:
|
||||
rv.SetString(string(bk))
|
||||
return nil
|
||||
}
|
||||
|
||||
var need int
|
||||
switch k {
|
||||
case kindBool, kindInt8, kindUint8:
|
||||
need = 1
|
||||
case kindInt16, kindUint16:
|
||||
need = 2
|
||||
case kindInt32, kindUint32, kindInt, kindUint:
|
||||
need = 4
|
||||
case kindInt64, kindUint64:
|
||||
need = 8
|
||||
}
|
||||
if len(bk) != need {
|
||||
return fmt.Errorf("%w: got %d bytes for PK, need %d", ErrStore, len(bk), need)
|
||||
}
|
||||
|
||||
switch k {
|
||||
case kindBool:
|
||||
rv.SetBool(bk[0] != 0)
|
||||
case kindInt8:
|
||||
rv.SetInt(int64(int8(bk[0]) - math.MinInt8))
|
||||
case kindInt16:
|
||||
rv.SetInt(int64(int16(binary.BigEndian.Uint16(bk)) - math.MinInt16))
|
||||
case kindInt32, kindInt:
|
||||
rv.SetInt(int64(int32(binary.BigEndian.Uint32(bk)) - math.MinInt32))
|
||||
case kindInt64:
|
||||
rv.SetInt(int64(int64(binary.BigEndian.Uint64(bk)) - math.MinInt64))
|
||||
case kindUint8:
|
||||
rv.SetUint(uint64(bk[0]))
|
||||
case kindUint16:
|
||||
rv.SetUint(uint64(binary.BigEndian.Uint16(bk)))
|
||||
case kindUint32, kindUint:
|
||||
rv.SetUint(uint64(binary.BigEndian.Uint32(bk)))
|
||||
case kindUint64:
|
||||
rv.SetUint(uint64(binary.BigEndian.Uint64(bk)))
|
||||
default:
|
||||
// note: we cannot have kindTime as primary key at the moment.
|
||||
return fmt.Errorf("%w: unsupported primary key type %v", ErrType, rv.Type())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseKey parses the PK (last element) of an index key.
|
||||
// If all is set, also gathers the values before and returns them in the second
|
||||
// parameter.
|
||||
func (idx *index) parseKey(buf []byte, all bool) ([]byte, [][]byte, error) {
|
||||
var err error
|
||||
var keys [][]byte
|
||||
take := func(n int) {
|
||||
if len(buf) < n {
|
||||
err = fmt.Errorf("%w: not enough bytes in index key", ErrStore)
|
||||
return
|
||||
}
|
||||
if all {
|
||||
keys = append(keys, buf[:n])
|
||||
}
|
||||
buf = buf[n:]
|
||||
}
|
||||
fields:
|
||||
for _, f := range idx.Fields {
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
switch f.Type.Kind {
|
||||
case kindString:
|
||||
for i, b := range buf {
|
||||
if b == 0 {
|
||||
if all {
|
||||
keys = append(keys, buf[:i])
|
||||
}
|
||||
buf = buf[i+1:]
|
||||
continue fields
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("%w: bad string without 0 in index key", ErrStore)
|
||||
case kindBool:
|
||||
take(1)
|
||||
case kindInt8, kindUint8:
|
||||
take(1)
|
||||
case kindInt16, kindUint16:
|
||||
take(2)
|
||||
case kindInt32, kindUint32, kindInt, kindUint:
|
||||
take(4)
|
||||
case kindInt64, kindUint64:
|
||||
take(8)
|
||||
case kindTime:
|
||||
take(8 + 4)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pk := buf
|
||||
|
||||
switch idx.tv.Fields[0].Type.Kind {
|
||||
case kindBool:
|
||||
take(1)
|
||||
case kindInt8, kindUint8:
|
||||
take(1)
|
||||
case kindInt16, kindUint16:
|
||||
take(2)
|
||||
case kindInt32, kindInt, kindUint32, kindUint:
|
||||
take(4)
|
||||
case kindInt64, kindUint64:
|
||||
take(8)
|
||||
}
|
||||
if len(pk) != len(buf) && len(buf) != 0 {
|
||||
return nil, nil, fmt.Errorf("%w: leftover bytes in index key (%x)", ErrStore, buf)
|
||||
}
|
||||
if all {
|
||||
return pk, keys[:len(keys)-1], nil
|
||||
}
|
||||
return pk, nil, nil
|
||||
}
|
||||
|
||||
// packKey returns a key to store in an index: first the prefix without pk, then
|
||||
// the prefix including pk.
|
||||
func (idx *index) packKey(rv reflect.Value, pk []byte) ([]byte, []byte, error) {
|
||||
var l []reflect.Value
|
||||
for _, f := range idx.Fields {
|
||||
frv := rv.FieldByIndex(f.structField.Index)
|
||||
l = append(l, frv)
|
||||
}
|
||||
return packIndexKeys(l, pk)
|
||||
}
|
||||
|
||||
// packIndexKeys packs values from l, followed by the pk.
|
||||
// It returns the key prefix (without pk), and full key with pk.
|
||||
func packIndexKeys(l []reflect.Value, pk []byte) ([]byte, []byte, error) {
|
||||
var prek, ik []byte
|
||||
for _, frv := range l {
|
||||
k, err := typeKind(frv.Type())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var buf []byte
|
||||
switch k {
|
||||
case kindBool:
|
||||
buf = []byte{0}
|
||||
if frv.Bool() {
|
||||
buf[0] = 1
|
||||
}
|
||||
case kindInt8:
|
||||
buf = []byte{byte(int8(frv.Int()) + math.MinInt8)}
|
||||
case kindInt16:
|
||||
buf = binary.BigEndian.AppendUint16(nil, uint16(int16(frv.Int())+math.MinInt16))
|
||||
case kindInt32:
|
||||
buf = binary.BigEndian.AppendUint32(nil, uint32(int32(frv.Int())+math.MinInt32))
|
||||
case kindInt:
|
||||
i := frv.Int()
|
||||
if i < math.MinInt32 || i > math.MaxInt32 {
|
||||
return nil, nil, fmt.Errorf("%w: int value %d does not fit in int32", ErrParam, i)
|
||||
}
|
||||
buf = binary.BigEndian.AppendUint32(nil, uint32(int32(i)+math.MinInt32))
|
||||
case kindInt64:
|
||||
buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Int()+math.MinInt64))
|
||||
case kindUint8:
|
||||
buf = []byte{byte(frv.Uint())}
|
||||
case kindUint16:
|
||||
buf = binary.BigEndian.AppendUint16(nil, uint16(frv.Uint()))
|
||||
case kindUint32:
|
||||
buf = binary.BigEndian.AppendUint32(nil, uint32(frv.Uint()))
|
||||
case kindUint:
|
||||
i := frv.Uint()
|
||||
if i > math.MaxUint32 {
|
||||
return nil, nil, fmt.Errorf("%w: uint value %d does not fit in uint32", ErrParam, i)
|
||||
}
|
||||
buf = binary.BigEndian.AppendUint32(nil, uint32(i))
|
||||
case kindUint64:
|
||||
buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Uint()))
|
||||
case kindString:
|
||||
buf = []byte(frv.String())
|
||||
for _, c := range buf {
|
||||
if c == 0 {
|
||||
return nil, nil, fmt.Errorf("%w: string used as index key cannot have \\0", ErrParam)
|
||||
}
|
||||
}
|
||||
buf = append(buf, 0)
|
||||
case kindTime:
|
||||
tm := frv.Interface().(time.Time)
|
||||
buf = binary.BigEndian.AppendUint64(nil, uint64(tm.Unix()+math.MinInt64))
|
||||
buf = binary.BigEndian.AppendUint32(buf, uint32(tm.Nanosecond()))
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("internal error: bad type %v for index", frv.Type()) // todo: should be caught when making index type
|
||||
}
|
||||
ik = append(ik, buf...)
|
||||
}
|
||||
n := len(ik)
|
||||
ik = append(ik, pk...)
|
||||
prek = ik[:n]
|
||||
return prek, ik, nil
|
||||
}
|
218
vendor/github.com/mjl-/bstore/nonzero.go
generated
vendored
Normal file
218
vendor/github.com/mjl-/bstore/nonzero.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// isZero returns whether v is the zero value for the fields that we store.
|
||||
// reflect.IsZero cannot be used on structs because it checks private fields as well.
|
||||
func (ft fieldType) isZero(v reflect.Value) bool {
|
||||
if !v.IsValid() {
|
||||
return true
|
||||
}
|
||||
if ft.Ptr {
|
||||
return v.IsNil()
|
||||
}
|
||||
switch ft.Kind {
|
||||
case kindStruct:
|
||||
for _, f := range ft.Fields {
|
||||
if !f.Type.isZero(v.FieldByIndex(f.structField.Index)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
// Use standard IsZero otherwise, also for kindBinaryMarshal.
|
||||
return v.IsZero()
|
||||
}
|
||||
|
||||
// checkNonzero compare ofields and nfields (from previous type schema vs newly
|
||||
// created type schema) for nonzero struct tag. If an existing field got a
|
||||
// nonzero struct tag added, we verify that there are indeed no nonzero values
|
||||
// in the database. If there are, we return ErrZero.
|
||||
func (tx *Tx) checkNonzero(st storeType, tv *typeVersion, ofields, nfields []field) error {
|
||||
// First we gather paths that we need to check, so we can later simply
|
||||
// execute those steps on all data we need to read.
|
||||
paths := &follows{}
|
||||
next:
|
||||
for _, f := range nfields {
|
||||
for _, of := range ofields {
|
||||
if f.Name == of.Name {
|
||||
err := f.checkNonzeroGather(&of, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue next
|
||||
}
|
||||
}
|
||||
if err := f.checkNonzeroGather(nil, paths); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(paths.paths) == 0 {
|
||||
// Common case, not reading all data.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finally actually do the checks.
|
||||
// todo: if there are only top-level fields to check, and we have an index, we can use the index check this without reading all data.
|
||||
return tx.checkNonzeroPaths(st, tv, paths.paths)
|
||||
}
|
||||
|
||||
type follow struct {
|
||||
mapKey, mapValue bool
|
||||
field field
|
||||
}
|
||||
|
||||
type follows struct {
|
||||
current []follow
|
||||
paths [][]follow
|
||||
}
|
||||
|
||||
func (f *follows) push(ff follow) {
|
||||
f.current = append(f.current, ff)
|
||||
}
|
||||
|
||||
func (f *follows) pop() {
|
||||
f.current = f.current[:len(f.current)-1]
|
||||
}
|
||||
|
||||
func (f *follows) add() {
|
||||
f.paths = append(f.paths, append([]follow{}, f.current...))
|
||||
}
|
||||
|
||||
func (f field) checkNonzeroGather(of *field, paths *follows) error {
|
||||
paths.push(follow{field: f})
|
||||
defer paths.pop()
|
||||
if f.Nonzero && (of == nil || !of.Nonzero) {
|
||||
paths.add()
|
||||
}
|
||||
if of != nil {
|
||||
return f.Type.checkNonzeroGather(of.Type, paths)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ft fieldType) checkNonzeroGather(oft fieldType, paths *follows) error {
|
||||
switch ft.Kind {
|
||||
case kindMap:
|
||||
paths.push(follow{mapKey: true})
|
||||
if err := ft.MapKey.checkNonzeroGather(*oft.MapKey, paths); err != nil {
|
||||
return err
|
||||
}
|
||||
paths.pop()
|
||||
|
||||
paths.push(follow{mapValue: true})
|
||||
if err := ft.MapValue.checkNonzeroGather(*oft.MapValue, paths); err != nil {
|
||||
return err
|
||||
}
|
||||
paths.pop()
|
||||
|
||||
case kindSlice:
|
||||
err := ft.List.checkNonzeroGather(*oft.List, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case kindStruct:
|
||||
next:
|
||||
for _, ff := range ft.Fields {
|
||||
for _, off := range oft.Fields {
|
||||
if ff.Name == off.Name {
|
||||
err := ff.checkNonzeroGather(&off, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue next
|
||||
}
|
||||
}
|
||||
err := ff.checkNonzeroGather(nil, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkNonzero reads through all records of a type, and checks that the fields
|
||||
// indicated by paths are nonzero. If not, ErrZero is returned.
|
||||
func (tx *Tx) checkNonzeroPaths(st storeType, tv *typeVersion, paths [][]follow) error {
|
||||
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return rb.ForEach(func(bk, bv []byte) error {
|
||||
tx.stats.Records.Cursor++
|
||||
|
||||
rv, err := st.parseNew(bk, bv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// todo optimization: instead of parsing the full record, use the fieldmap to see if the value is nonzero.
|
||||
for _, path := range paths {
|
||||
frv := rv.FieldByIndex(path[0].field.structField.Index)
|
||||
if err := path[0].field.checkNonzero(frv, path[1:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (f field) checkNonzero(rv reflect.Value, path []follow) error {
|
||||
if len(path) == 0 {
|
||||
if !f.Nonzero {
|
||||
return fmt.Errorf("internal error: checkNonzero: expected field to have Nonzero set")
|
||||
}
|
||||
if f.Type.isZero(rv) {
|
||||
return fmt.Errorf("%w: field %q", ErrZero, f.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return f.Type.checkNonzero(rv, path)
|
||||
}
|
||||
|
||||
func (ft fieldType) checkNonzero(rv reflect.Value, path []follow) error {
|
||||
switch ft.Kind {
|
||||
case kindMap:
|
||||
follow := path[0]
|
||||
path = path[1:]
|
||||
key := follow.mapKey
|
||||
if !key && !follow.mapValue {
|
||||
return fmt.Errorf("internal error: following map, expected mapKey or mapValue, got %#v", follow)
|
||||
}
|
||||
|
||||
iter := rv.MapRange()
|
||||
for iter.Next() {
|
||||
var err error
|
||||
if key {
|
||||
err = ft.MapKey.checkNonzero(iter.Key(), path)
|
||||
} else {
|
||||
err = ft.MapValue.checkNonzero(iter.Value(), path)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case kindSlice:
|
||||
n := rv.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
if err := ft.List.checkNonzero(rv.Index(i), path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case kindStruct:
|
||||
follow := path[0]
|
||||
path = path[1:]
|
||||
frv := rv.FieldByIndex(follow.field.structField.Index)
|
||||
if err := follow.field.checkNonzero(frv, path); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("internal error: checkNonzero with non-empty path, but kind %v", ft.Kind)
|
||||
}
|
||||
return nil
|
||||
}
|
276
vendor/github.com/mjl-/bstore/pack.go
generated
vendored
Normal file
276
vendor/github.com/mjl-/bstore/pack.go
generated
vendored
Normal file
@ -0,0 +1,276 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// fieldmap represents a bitmap indicating which fields are actually stored and
|
||||
// can be parsed. zero values for fields are not otherwise stored.
|
||||
type fieldmap struct {
|
||||
max int // Required number of fields.
|
||||
buf []byte // Bitmap, we write the next 0/1 at bit n.
|
||||
n int // Fields seen so far.
|
||||
offset int // In final output, we write buf back after finish. Only relevant for packing.
|
||||
Errorf func(format string, args ...any)
|
||||
}
|
||||
|
||||
// add bit to fieldmap indicating if the field is nonzero.
|
||||
func (f *fieldmap) Field(nonzero bool) {
|
||||
o := f.n / 8
|
||||
if f.n >= f.max {
|
||||
f.Errorf("internal error: too many fields, max %d", f.max)
|
||||
}
|
||||
if nonzero {
|
||||
f.buf[o] |= 1 << (7 - f.n%8)
|
||||
}
|
||||
f.n++
|
||||
}
|
||||
|
||||
// check if field i is nonzero.
|
||||
func (f *fieldmap) Nonzero(i int) bool {
|
||||
v := f.buf[i/8]&(1<<(7-i%8)) != 0
|
||||
return v
|
||||
}
|
||||
|
||||
type packer struct {
|
||||
b *bytes.Buffer
|
||||
offset int
|
||||
fieldmaps []*fieldmap // Pending fieldmaps, not excluding fieldmap below.
|
||||
fieldmap *fieldmap // Currently active.
|
||||
popped []*fieldmap // Completed fieldmaps, to be written back during finish.
|
||||
}
|
||||
|
||||
func (p *packer) Errorf(format string, args ...any) {
|
||||
panic(packErr{fmt.Errorf(format, args...)})
|
||||
}
|
||||
|
||||
// Push a new fieldmap on the stack for n fields.
|
||||
func (p *packer) PushFieldmap(n int) {
|
||||
p.fieldmaps = append(p.fieldmaps, p.fieldmap)
|
||||
buf := make([]byte, (n+7)/8)
|
||||
p.fieldmap = &fieldmap{max: n, buf: buf, offset: p.offset, Errorf: p.Errorf}
|
||||
p.Write(buf) // Updates offset. Write errors cause panic.
|
||||
}
|
||||
|
||||
// Pop a fieldmap from the stack. It is remembered in popped for writing the
|
||||
// bytes during finish.
|
||||
func (p *packer) PopFieldmap() {
|
||||
if p.fieldmap.n != p.fieldmap.max {
|
||||
p.Errorf("internal error: fieldmap n %d != max %d", p.fieldmap.n, p.fieldmap.max)
|
||||
}
|
||||
p.popped = append(p.popped, p.fieldmap)
|
||||
p.fieldmap = p.fieldmaps[len(p.fieldmaps)-1]
|
||||
p.fieldmaps = p.fieldmaps[:len(p.fieldmaps)-1]
|
||||
}
|
||||
|
||||
// Finish writes back finished (popped) fieldmaps to the correct offset,
|
||||
// returning the final bytes representation of this record.
|
||||
func (p *packer) Finish() []byte {
|
||||
if p.fieldmap != nil {
|
||||
p.Errorf("internal error: leftover fieldmap during finish")
|
||||
}
|
||||
buf := p.b.Bytes()
|
||||
for _, f := range p.popped {
|
||||
copy(buf[f.offset:], f.buf)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// Field adds field with nonzeroness to the current fieldmap.
|
||||
func (p *packer) Field(nonzero bool) {
|
||||
p.fieldmap.Field(nonzero)
|
||||
}
|
||||
|
||||
func (p *packer) Write(buf []byte) (int, error) {
|
||||
n, err := p.b.Write(buf)
|
||||
if err != nil {
|
||||
p.Errorf("write: %w", err)
|
||||
}
|
||||
if n > 0 {
|
||||
p.offset += n
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *packer) AddBytes(buf []byte) {
|
||||
p.Uvarint(uint64(len(buf)))
|
||||
p.Write(buf) // Write errors cause panic.
|
||||
}
|
||||
|
||||
func (p *packer) Uvarint(v uint64) {
|
||||
buf := make([]byte, binary.MaxVarintLen64)
|
||||
o := binary.PutUvarint(buf, v)
|
||||
p.Write(buf[:o]) // Write errors cause panic.
|
||||
}
|
||||
|
||||
func (p *packer) Varint(v int64) {
|
||||
buf := make([]byte, binary.MaxVarintLen64)
|
||||
o := binary.PutVarint(buf, v)
|
||||
p.Write(buf[:o]) // Write errors cause panic.
|
||||
}
|
||||
|
||||
type packErr struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// pack rv (reflect.Struct), excluding the primary key field.
|
||||
func (st storeType) pack(rv reflect.Value) (rbuf []byte, rerr error) {
|
||||
p := &packer{b: &bytes.Buffer{}}
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
perr, ok := x.(packErr)
|
||||
if ok {
|
||||
rerr = perr.err
|
||||
return
|
||||
}
|
||||
panic(x)
|
||||
}()
|
||||
st.Current.pack(p, rv)
|
||||
return p.Finish(), nil
|
||||
}
|
||||
|
||||
func (tv typeVersion) pack(p *packer, rv reflect.Value) {
|
||||
// When parsing, the same typeVersion (type schema) is used to
|
||||
// interpret the bytes correctly.
|
||||
p.Uvarint(uint64(tv.Version))
|
||||
|
||||
p.PushFieldmap(len(tv.Fields) - 1)
|
||||
|
||||
for _, f := range tv.Fields[1:] {
|
||||
nrv := rv.FieldByIndex(f.structField.Index)
|
||||
if f.Type.isZero(nrv) {
|
||||
if f.Nonzero {
|
||||
p.Errorf("%w: %q", ErrZero, f.Name)
|
||||
}
|
||||
p.Field(false)
|
||||
// Pretend to pack to get the nonzero checks.
|
||||
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
|
||||
f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv)
|
||||
}
|
||||
} else {
|
||||
p.Field(true)
|
||||
f.Type.pack(p, nrv)
|
||||
}
|
||||
}
|
||||
p.PopFieldmap()
|
||||
}
|
||||
|
||||
// pack the nonzero value rv.
|
||||
func (ft fieldType) pack(p *packer, rv reflect.Value) {
|
||||
if ft.Ptr {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
switch ft.Kind {
|
||||
case kindBytes:
|
||||
p.AddBytes(rv.Bytes())
|
||||
case kindBinaryMarshal:
|
||||
v := rv
|
||||
buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
|
||||
if err != nil {
|
||||
p.Errorf("marshalbinary: %w", err)
|
||||
}
|
||||
p.AddBytes(buf)
|
||||
case kindBool:
|
||||
// No value needed. If false, it would be zero, handled above,
|
||||
// with a 0 in the fieldmap.
|
||||
case kindInt:
|
||||
v := rv.Int()
|
||||
if v < math.MinInt32 || v > math.MaxInt32 {
|
||||
p.Errorf("%w: int %d does not fit in int32", ErrParam, v)
|
||||
}
|
||||
p.Varint(v)
|
||||
case kindInt8, kindInt16, kindInt32, kindInt64:
|
||||
p.Varint(rv.Int())
|
||||
case kindUint8, kindUint16, kindUint32, kindUint64:
|
||||
p.Uvarint(rv.Uint())
|
||||
case kindUint:
|
||||
v := rv.Uint()
|
||||
if v > math.MaxUint32 {
|
||||
p.Errorf("%w: uint %d does not fit in uint32", ErrParam, v)
|
||||
}
|
||||
p.Uvarint(v)
|
||||
case kindFloat32:
|
||||
p.Uvarint(uint64(math.Float32bits(rv.Interface().(float32))))
|
||||
case kindFloat64:
|
||||
p.Uvarint(uint64(math.Float64bits(rv.Interface().(float64))))
|
||||
case kindString:
|
||||
p.AddBytes([]byte(rv.String()))
|
||||
case kindTime:
|
||||
buf, err := rv.Interface().(time.Time).MarshalBinary()
|
||||
if err != nil {
|
||||
p.Errorf("%w: pack time: %s", ErrParam, err)
|
||||
}
|
||||
p.AddBytes(buf)
|
||||
case kindSlice:
|
||||
n := rv.Len()
|
||||
p.Uvarint(uint64(n))
|
||||
p.PushFieldmap(n)
|
||||
for i := 0; i < n; i++ {
|
||||
nrv := rv.Index(i)
|
||||
if ft.List.isZero(nrv) {
|
||||
p.Field(false)
|
||||
// Pretend to pack to get the nonzero checks of the element.
|
||||
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
|
||||
ft.List.pack(&packer{b: &bytes.Buffer{}}, nrv)
|
||||
}
|
||||
} else {
|
||||
p.Field(true)
|
||||
ft.List.pack(p, nrv)
|
||||
}
|
||||
}
|
||||
p.PopFieldmap()
|
||||
case kindMap:
|
||||
// We write a fieldmap for zeroness of the values. The keys are unique, so there
|
||||
// can only be max 1 zero key. But there can be many zero values. struct{} is
|
||||
// common in Go, good to support that efficiently.
|
||||
n := rv.Len()
|
||||
p.Uvarint(uint64(n))
|
||||
p.PushFieldmap(n)
|
||||
iter := rv.MapRange()
|
||||
for iter.Next() {
|
||||
ft.MapKey.pack(p, iter.Key())
|
||||
v := iter.Value()
|
||||
if ft.MapValue.isZero(v) {
|
||||
p.Field(false)
|
||||
// Pretend to pack to get the nonzero checks of the key type.
|
||||
if v.IsValid() && (v.Kind() != reflect.Ptr || !v.IsNil()) {
|
||||
ft.MapValue.pack(&packer{b: &bytes.Buffer{}}, v)
|
||||
}
|
||||
} else {
|
||||
p.Field(true)
|
||||
ft.MapValue.pack(p, v)
|
||||
}
|
||||
}
|
||||
p.PopFieldmap()
|
||||
case kindStruct:
|
||||
p.PushFieldmap(len(ft.Fields))
|
||||
for _, f := range ft.Fields {
|
||||
nrv := rv.FieldByIndex(f.structField.Index)
|
||||
if f.Type.isZero(nrv) {
|
||||
if f.Nonzero {
|
||||
p.Errorf("%w: %q", ErrZero, f.Name)
|
||||
}
|
||||
p.Field(false)
|
||||
// Pretend to pack to get the nonzero checks.
|
||||
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
|
||||
f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv)
|
||||
}
|
||||
} else {
|
||||
p.Field(true)
|
||||
f.Type.pack(p, nrv)
|
||||
}
|
||||
}
|
||||
p.PopFieldmap()
|
||||
default:
|
||||
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
|
||||
}
|
||||
}
|
321
vendor/github.com/mjl-/bstore/parse.go
generated
vendored
Normal file
321
vendor/github.com/mjl-/bstore/parse.go
generated
vendored
Normal file
@ -0,0 +1,321 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
buf []byte
|
||||
orig []byte
|
||||
}
|
||||
|
||||
func (p *parser) Errorf(format string, args ...any) {
|
||||
panic(parseErr{fmt.Errorf(format, args...)})
|
||||
}
|
||||
|
||||
func (p *parser) checkInt(un uint64) int {
|
||||
if un > math.MaxInt32 {
|
||||
p.Errorf("%w: uvarint %d does not fit in int32", ErrStore, un)
|
||||
}
|
||||
return int(un)
|
||||
}
|
||||
|
||||
// Fieldmap starts a new fieldmap for n fields.
|
||||
func (p *parser) Fieldmap(n int) *fieldmap {
|
||||
// log.Printf("parse fieldmap %d bits", n)
|
||||
nb := (n + 7) / 8
|
||||
buf := p.Take(nb)
|
||||
return &fieldmap{n, buf, 0, 0, p.Errorf}
|
||||
}
|
||||
|
||||
// Take reads nb bytes.
|
||||
func (p *parser) Take(nb int) []byte {
|
||||
// log.Printf("take %d", nb)
|
||||
if len(p.buf) < nb {
|
||||
p.Errorf("%w: not enough bytes", ErrStore)
|
||||
}
|
||||
buf := p.buf[:nb]
|
||||
p.buf = p.buf[nb:]
|
||||
return buf
|
||||
}
|
||||
|
||||
// TakeBytes reads a uvarint representing the size of the bytes, followed by
|
||||
// that number of bytes.
|
||||
// dup is needed if you need to hold on to the bytes. Values from BoltDB are
|
||||
// only valid in the transaction, and not meant to be modified and are
|
||||
// memory-mapped read-only.
|
||||
func (p *parser) TakeBytes(dup bool) []byte {
|
||||
un := p.Uvarint()
|
||||
n := p.checkInt(un)
|
||||
buf := p.Take(n)
|
||||
if dup {
|
||||
// todo: check for a max size, beyond which we refuse to allocate?
|
||||
nbuf := make([]byte, len(buf))
|
||||
copy(nbuf, buf)
|
||||
buf = nbuf
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func (p *parser) Uvarint() uint64 {
|
||||
v, n := binary.Uvarint(p.buf)
|
||||
if n == 0 {
|
||||
p.Errorf("%w: uvarint: not enough bytes", ErrStore)
|
||||
}
|
||||
if n < 0 {
|
||||
p.Errorf("%w: uvarint overflow", ErrStore)
|
||||
}
|
||||
// log.Printf("take uvarint, %d bytes", n)
|
||||
p.buf = p.buf[n:]
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *parser) Varint() int64 {
|
||||
v, n := binary.Varint(p.buf)
|
||||
if n == 0 {
|
||||
p.Errorf("%w: varint: not enough bytes", ErrStore)
|
||||
}
|
||||
if n < 0 {
|
||||
p.Errorf("%w: varint overflow", ErrStore)
|
||||
}
|
||||
// log.Printf("take varint, %d bytes", n)
|
||||
p.buf = p.buf[n:]
|
||||
return v
|
||||
}
|
||||
|
||||
type parseErr struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// parse rv (reflect.Struct) from buf.
|
||||
// does not part primary key field.
|
||||
func (st storeType) parse(rv reflect.Value, buf []byte) (rerr error) {
|
||||
p := &parser{buf: buf, orig: buf}
|
||||
var version uint32
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
perr, ok := x.(parseErr)
|
||||
if ok {
|
||||
rerr = fmt.Errorf("%w (version %d, buf %x, orig %x)", perr.err, version, p.buf, p.orig)
|
||||
return
|
||||
}
|
||||
panic(x)
|
||||
}()
|
||||
|
||||
version = uint32(p.Uvarint())
|
||||
tv, ok := st.Versions[version]
|
||||
if !ok {
|
||||
return fmt.Errorf("%w: unknown type version %d", ErrStore, version)
|
||||
}
|
||||
|
||||
tv.parse(p, rv)
|
||||
|
||||
if len(p.buf) != 0 {
|
||||
return fmt.Errorf("%w: leftover data after parsing", ErrStore)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseNew parses bk and bv into a newly created value of type st.Type.
|
||||
func (st storeType) parseNew(bk, bv []byte) (reflect.Value, error) {
|
||||
rv := reflect.New(st.Type).Elem()
|
||||
if err := st.parseFull(rv, bk, bv); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
// parseFull parses a full record from bk and bv into value rv, which must be
|
||||
// of type st.Type.
|
||||
func (st storeType) parseFull(rv reflect.Value, bk, bv []byte) error {
|
||||
if err := parsePK(rv.Field(0), bk); err != nil {
|
||||
return err
|
||||
}
|
||||
err := st.parse(rv, bv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tv typeVersion) parse(p *parser, rv reflect.Value) {
|
||||
// First field is the primary key, stored as boltdb key only, not in
|
||||
// the value.
|
||||
fm := p.Fieldmap(len(tv.Fields) - 1)
|
||||
for i, f := range tv.Fields[1:] {
|
||||
if f.structField.Type == nil {
|
||||
// Do not parse this field in the current Go type, but
|
||||
// we must still skip over the bytes.
|
||||
if fm.Nonzero(i) {
|
||||
f.Type.skip(p)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fm.Nonzero(i) {
|
||||
f.Type.parse(p, rv.FieldByIndex(f.structField.Index))
|
||||
} else if f.Nonzero {
|
||||
// Consistency check. Should not happen, we enforce nonzeroness.
|
||||
p.Errorf("%w: unexpected nonzero value for %q", ErrStore, f.Name)
|
||||
} else {
|
||||
rv.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse a nonzero fieldType.
|
||||
func (ft fieldType) parse(p *parser, rv reflect.Value) {
|
||||
// Because we allow schema changes from ptr to nonptr, rv can be a pointer or direct value regardless of ft.Ptr.
|
||||
if rv.Kind() == reflect.Ptr {
|
||||
nrv := reflect.New(rv.Type().Elem())
|
||||
rv.Set(nrv)
|
||||
rv = nrv.Elem()
|
||||
}
|
||||
switch ft.Kind {
|
||||
case kindBytes:
|
||||
rv.SetBytes(p.TakeBytes(true))
|
||||
case kindBinaryMarshal:
|
||||
buf := p.TakeBytes(false)
|
||||
t := rv.Type()
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
v := reflect.New(t)
|
||||
err := v.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(buf)
|
||||
if err != nil {
|
||||
panic(parseErr{err})
|
||||
}
|
||||
if rv.Type().Kind() == reflect.Ptr {
|
||||
rv.Set(v)
|
||||
} else {
|
||||
rv.Set(v.Elem())
|
||||
}
|
||||
case kindBool:
|
||||
rv.SetBool(true)
|
||||
case kindInt:
|
||||
v := p.Varint()
|
||||
if v < math.MinInt32 || v > math.MaxInt32 {
|
||||
p.Errorf("%w: int %d does not fit in int32", ErrStore, v)
|
||||
}
|
||||
rv.SetInt(v)
|
||||
case kindInt8, kindInt16, kindInt32, kindInt64:
|
||||
rv.SetInt(p.Varint())
|
||||
case kindUint:
|
||||
v := p.Uvarint()
|
||||
if v > math.MaxUint32 {
|
||||
p.Errorf("%w: uint %d does not fit in uint32", ErrStore, v)
|
||||
}
|
||||
rv.SetUint(v)
|
||||
case kindUint8, kindUint16, kindUint32, kindUint64:
|
||||
rv.SetUint(p.Uvarint())
|
||||
case kindFloat32:
|
||||
rv.SetFloat(float64(math.Float32frombits(uint32(p.Uvarint()))))
|
||||
case kindFloat64:
|
||||
rv.SetFloat(math.Float64frombits(p.Uvarint()))
|
||||
case kindString:
|
||||
rv.SetString(string(p.TakeBytes(false)))
|
||||
case kindTime:
|
||||
err := rv.Addr().Interface().(*time.Time).UnmarshalBinary(p.TakeBytes(false))
|
||||
if err != nil {
|
||||
p.Errorf("%w: parsing time: %s", ErrStore, err)
|
||||
}
|
||||
case kindSlice:
|
||||
un := p.Uvarint()
|
||||
n := p.checkInt(un)
|
||||
fm := p.Fieldmap(n)
|
||||
slc := reflect.MakeSlice(rv.Type(), n, n)
|
||||
for i := 0; i < int(n); i++ {
|
||||
if fm.Nonzero(i) {
|
||||
ft.List.parse(p, slc.Index(i))
|
||||
}
|
||||
}
|
||||
rv.Set(slc)
|
||||
case kindMap:
|
||||
un := p.Uvarint()
|
||||
n := p.checkInt(un)
|
||||
fm := p.Fieldmap(n)
|
||||
mp := reflect.MakeMapWithSize(rv.Type(), n)
|
||||
for i := 0; i < n; i++ {
|
||||
mk := reflect.New(rv.Type().Key()).Elem()
|
||||
ft.MapKey.parse(p, mk)
|
||||
mv := reflect.New(rv.Type().Elem()).Elem()
|
||||
if fm.Nonzero(i) {
|
||||
ft.MapValue.parse(p, mv)
|
||||
}
|
||||
mp.SetMapIndex(mk, mv)
|
||||
}
|
||||
rv.Set(mp)
|
||||
case kindStruct:
|
||||
fm := p.Fieldmap(len(ft.Fields))
|
||||
strct := reflect.New(rv.Type()).Elem()
|
||||
for i, f := range ft.Fields {
|
||||
if f.structField.Type == nil {
|
||||
f.Type.skip(p)
|
||||
continue
|
||||
}
|
||||
if fm.Nonzero(i) {
|
||||
f.Type.parse(p, strct.FieldByIndex(f.structField.Index))
|
||||
} else if f.Nonzero {
|
||||
// Consistency check, we enforce that nonzero is not stored if not allowed.
|
||||
p.Errorf("%w: %q", ErrZero, f.Name)
|
||||
} else {
|
||||
strct.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type))
|
||||
}
|
||||
}
|
||||
rv.Set(strct)
|
||||
default:
|
||||
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
|
||||
}
|
||||
}
|
||||
|
||||
// skip over the bytes for this fieldType. Needed when an older typeVersion has
|
||||
// a field that the current reflect.Type does not (can) have.
|
||||
func (ft fieldType) skip(p *parser) {
|
||||
switch ft.Kind {
|
||||
case kindBytes, kindBinaryMarshal, kindString:
|
||||
p.TakeBytes(false)
|
||||
case kindBool:
|
||||
case kindInt8, kindInt16, kindInt32, kindInt, kindInt64:
|
||||
p.Varint()
|
||||
case kindUint8, kindUint16, kindUint32, kindUint, kindUint64, kindFloat32, kindFloat64:
|
||||
p.Uvarint()
|
||||
case kindTime:
|
||||
p.TakeBytes(false)
|
||||
case kindSlice:
|
||||
un := p.Uvarint()
|
||||
n := p.checkInt(un)
|
||||
fm := p.Fieldmap(n)
|
||||
for i := 0; i < n; i++ {
|
||||
if fm.Nonzero(i) {
|
||||
ft.List.skip(p)
|
||||
}
|
||||
}
|
||||
case kindMap:
|
||||
un := p.Uvarint()
|
||||
n := p.checkInt(un)
|
||||
fm := p.Fieldmap(n)
|
||||
for i := 0; i < n; i++ {
|
||||
ft.MapKey.skip(p)
|
||||
if fm.Nonzero(i) {
|
||||
ft.MapValue.skip(p)
|
||||
}
|
||||
}
|
||||
case kindStruct:
|
||||
fm := p.Fieldmap(len(ft.Fields))
|
||||
for i, f := range ft.Fields {
|
||||
if fm.Nonzero(i) {
|
||||
f.Type.skip(p)
|
||||
}
|
||||
}
|
||||
default:
|
||||
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
|
||||
}
|
||||
}
|
341
vendor/github.com/mjl-/bstore/plan.go
generated
vendored
Normal file
341
vendor/github.com/mjl-/bstore/plan.go
generated
vendored
Normal file
@ -0,0 +1,341 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Plan represents a plan to execute a query, possibly using a simple/quick
|
||||
// bucket "get" or cursor scan (forward/backward) on either the records or an
|
||||
// index.
|
||||
type plan[T any] struct {
|
||||
// The index for this plan. If nil, we are using pk's, in which case
|
||||
// "keys" below can be nil for a range scan with start/stop (possibly empty
|
||||
// for full scan), or non-nil for looking up specific keys.
|
||||
idx *index
|
||||
|
||||
// Use full unique index to get specific values from keys. idx above can be
|
||||
// a unique index that we only use partially. In that case, this field is
|
||||
// false.
|
||||
unique bool
|
||||
|
||||
// If not nil, used to fetch explicit keys when using pk or unique
|
||||
// index. Required non-nil for unique.
|
||||
keys [][]byte
|
||||
|
||||
desc bool // Direction of the range scan.
|
||||
start []byte // First key to scan. Filters below may still apply. If desc, this value is > than stop (if it is set). If nil, we begin ranging at the first or last (for desc) key.
|
||||
stop []byte // Last key to scan. Can be nil independently of start.
|
||||
startInclusive bool // If the start and stop values are inclusive or exclusive.
|
||||
stopInclusive bool
|
||||
|
||||
// Filter we need to apply on after retrieving the record. If all
|
||||
// original filters from a query were handled by "keys" above, or by a
|
||||
// range scan, this field is empty.
|
||||
filters []filter[T]
|
||||
|
||||
// Orders we need to apply after first retrieving all records. As with
|
||||
// filters, if a range scan takes care of an ordering from the query,
|
||||
// this field is empty.
|
||||
orders []order
|
||||
}
|
||||
|
||||
// selectPlan selects the best plan for this query.
|
||||
func (q *Query[T]) selectPlan() (*plan[T], error) {
|
||||
// Simple case first: List of known IDs. We can just fetch them from
|
||||
// the records bucket by their primary keys. This is common for a
|
||||
// "Get" query.
|
||||
if q.xfilterIDs != nil {
|
||||
orders := q.xorders
|
||||
keys := q.xfilterIDs.pks
|
||||
// If there is an ordering on the PK field, we do the ordering here.
|
||||
if len(orders) > 0 && orders[0].field.Name == q.st.Current.Fields[0].Name {
|
||||
asc := orders[0].asc
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
cmp := bytes.Compare(keys[i], keys[j])
|
||||
return asc && cmp < 0 || !asc && cmp > 0
|
||||
})
|
||||
orders = orders[1:]
|
||||
}
|
||||
p := &plan[T]{
|
||||
keys: keys,
|
||||
filters: q.xfilters,
|
||||
orders: orders,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Try using a fully matched unique index. We build a map with all
|
||||
// fields that have an equal or in filter. So we can easily look
|
||||
// through our unique indices and get a match. We only look at a single
|
||||
// filter per field. If there are multiple, we would use the last one.
|
||||
// That's okay, we'll filter records out when we execute the leftover
|
||||
// filters. Probably not common.
|
||||
// This is common for filterEqual and filterIn on
|
||||
// fields that have a unique index.
|
||||
equalsIn := map[string]*filter[T]{}
|
||||
for i := range q.xfilters {
|
||||
ff := &q.xfilters[i]
|
||||
switch f := (*ff).(type) {
|
||||
case filterEqual[T]:
|
||||
equalsIn[f.field.Name] = ff
|
||||
case filterIn[T]:
|
||||
equalsIn[f.field.Name] = ff
|
||||
}
|
||||
}
|
||||
indices:
|
||||
for _, idx := range q.st.Current.Indices {
|
||||
// Direct fetches only for unique indices.
|
||||
if !idx.Unique {
|
||||
continue
|
||||
}
|
||||
for _, f := range idx.Fields {
|
||||
if _, ok := equalsIn[f.Name]; !ok {
|
||||
// At least one index field does not have a filter.
|
||||
continue indices
|
||||
}
|
||||
}
|
||||
// Calculate all keys that we need to retrieve from the index.
|
||||
// todo optimization: if there is a sort involving these fields, we could do the sorting before fetching data.
|
||||
// todo optimization: we can generate the keys on demand, will help when limit is in use: we are not generating all keys.
|
||||
var keys [][]byte
|
||||
var skipFilters []*filter[T] // Filters to remove from the full list because they are handled by quering the index.
|
||||
for i, f := range idx.Fields {
|
||||
var rvalues []reflect.Value
|
||||
ff := equalsIn[f.Name]
|
||||
skipFilters = append(skipFilters, ff)
|
||||
switch fi := (*ff).(type) {
|
||||
case filterEqual[T]:
|
||||
rvalues = []reflect.Value{fi.rvalue}
|
||||
case filterIn[T]:
|
||||
rvalues = fi.rvalues
|
||||
default:
|
||||
return nil, fmt.Errorf("internal error: bad filter %T", equalsIn[f.Name])
|
||||
}
|
||||
fekeys := make([][]byte, len(rvalues))
|
||||
for j, fv := range rvalues {
|
||||
key, _, err := packIndexKeys([]reflect.Value{fv}, nil)
|
||||
if err != nil {
|
||||
q.error(err)
|
||||
return nil, err
|
||||
}
|
||||
fekeys[j] = key
|
||||
}
|
||||
if i == 0 {
|
||||
keys = fekeys
|
||||
continue
|
||||
}
|
||||
// Multiply current keys with the new values.
|
||||
nkeys := make([][]byte, 0, len(keys)*len(fekeys))
|
||||
for _, k := range keys {
|
||||
for _, fk := range fekeys {
|
||||
nk := append(append([]byte{}, k...), fk...)
|
||||
nkeys = append(nkeys, nk)
|
||||
}
|
||||
}
|
||||
keys = nkeys
|
||||
}
|
||||
p := &plan[T]{
|
||||
idx: idx,
|
||||
unique: true,
|
||||
keys: keys,
|
||||
filters: dropFilters(q.xfilters, skipFilters),
|
||||
orders: q.xorders,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Try all other indices. We treat them all as non-unique indices now.
|
||||
// We want to use the one with as many "equal" prefix fields as
|
||||
// possible. Then we hope to use a scan on the remaining, either
|
||||
// because of a filterCompare, or for an ordering. If there is a limit,
|
||||
// orderings are preferred over compares.
|
||||
equals := map[string]*filter[T]{}
|
||||
for i := range q.xfilters {
|
||||
ff := &q.xfilters[i]
|
||||
switch f := (*ff).(type) {
|
||||
case filterEqual[T]:
|
||||
equals[f.field.Name] = ff
|
||||
}
|
||||
}
|
||||
|
||||
// We are going to generate new plans, and keep the new one if it is better than what we have.
|
||||
var p *plan[T]
|
||||
var nequals int
|
||||
var nrange int
|
||||
var ordered bool
|
||||
|
||||
evaluatePKOrIndex := func(idx *index) error {
|
||||
var isPK bool
|
||||
var packKeys func([]reflect.Value) ([]byte, error)
|
||||
if idx == nil {
|
||||
// Make pretend index.
|
||||
isPK = true
|
||||
idx = &index{
|
||||
Fields: []field{q.st.Current.Fields[0]},
|
||||
}
|
||||
packKeys = func(l []reflect.Value) ([]byte, error) {
|
||||
return packPK(l[0])
|
||||
}
|
||||
} else {
|
||||
packKeys = func(l []reflect.Value) ([]byte, error) {
|
||||
key, _, err := packIndexKeys(l, nil)
|
||||
return key, err
|
||||
}
|
||||
}
|
||||
|
||||
var neq = 0
|
||||
// log.Printf("idx %v", idx)
|
||||
var skipFilters []*filter[T]
|
||||
for _, f := range idx.Fields {
|
||||
if ff, ok := equals[f.Name]; ok {
|
||||
skipFilters = append(skipFilters, ff)
|
||||
neq++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// See if the next field can be used for compare.
|
||||
var gx, lx *filterCompare[T]
|
||||
var nrng int
|
||||
var order *order
|
||||
orders := q.xorders
|
||||
if neq < len(idx.Fields) {
|
||||
nf := idx.Fields[neq]
|
||||
for i := range q.xfilters {
|
||||
ff := &q.xfilters[i]
|
||||
switch f := (*ff).(type) {
|
||||
case filterCompare[T]:
|
||||
if f.field.Name != nf.Name {
|
||||
continue
|
||||
}
|
||||
switch f.op {
|
||||
case opGreater, opGreaterEqual:
|
||||
if gx == nil {
|
||||
gx = &f
|
||||
skipFilters = append(skipFilters, ff)
|
||||
nrng++
|
||||
}
|
||||
case opLess, opLessEqual:
|
||||
if lx == nil {
|
||||
lx = &f
|
||||
skipFilters = append(skipFilters, ff)
|
||||
nrng++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See if it can be used for ordering.
|
||||
// todo optimization: we could use multiple orders
|
||||
if len(orders) > 0 && orders[0].field.Name == nf.Name {
|
||||
order = &orders[0]
|
||||
orders = orders[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// See if this is better than what we had.
|
||||
if !(neq > nequals || (neq == nequals && (nrng > nrange || order != nil && !ordered && (q.xlimit > 0 || nrng == nrange)))) {
|
||||
// log.Printf("plan not better, neq %d, nrng %d, limit %d, order %v ordered %v", neq, nrng, q.limit, order, ordered)
|
||||
return nil
|
||||
}
|
||||
nequals = neq
|
||||
nrange = nrng
|
||||
ordered = order != nil
|
||||
|
||||
// Calculate the prefix key.
|
||||
var kvalues []reflect.Value
|
||||
for i := 0; i < neq; i++ {
|
||||
f := idx.Fields[i]
|
||||
kvalues = append(kvalues, (*equals[f.Name]).(filterEqual[T]).rvalue)
|
||||
}
|
||||
var key []byte
|
||||
var err error
|
||||
if neq > 0 {
|
||||
key, err = packKeys(kvalues)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
start := key
|
||||
stop := key
|
||||
if gx != nil {
|
||||
k, err := packKeys([]reflect.Value{gx.value})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
start = append(append([]byte{}, start...), k...)
|
||||
}
|
||||
if lx != nil {
|
||||
k, err := packKeys([]reflect.Value{lx.value})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stop = append(append([]byte{}, stop...), k...)
|
||||
}
|
||||
|
||||
startInclusive := gx == nil || gx.op != opGreater
|
||||
stopInclusive := lx == nil || lx.op != opLess
|
||||
if order != nil && !order.asc {
|
||||
start, stop = stop, start
|
||||
startInclusive, stopInclusive = stopInclusive, startInclusive
|
||||
}
|
||||
|
||||
if isPK {
|
||||
idx = nil // Clear our fake index for PK.
|
||||
}
|
||||
|
||||
p = &plan[T]{
|
||||
idx: idx,
|
||||
desc: order != nil && !order.asc,
|
||||
start: start,
|
||||
stop: stop,
|
||||
startInclusive: startInclusive,
|
||||
stopInclusive: stopInclusive,
|
||||
filters: dropFilters(q.xfilters, skipFilters),
|
||||
orders: orders,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := evaluatePKOrIndex(nil); err != nil {
|
||||
q.error(err)
|
||||
return nil, q.err
|
||||
}
|
||||
for _, idx := range q.st.Current.Indices {
|
||||
if err := evaluatePKOrIndex(idx); err != nil {
|
||||
q.error(err)
|
||||
return nil, q.err
|
||||
}
|
||||
|
||||
}
|
||||
if p != nil {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// We'll just do a scan over all data.
|
||||
p = &plan[T]{
|
||||
filters: q.xfilters,
|
||||
orders: q.xorders,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func dropFilters[T any](filters []T, skip []*T) []T {
|
||||
n := make([]T, 0, len(filters)-len(skip))
|
||||
next:
|
||||
for i := range filters {
|
||||
f := &filters[i]
|
||||
for _, s := range skip {
|
||||
if f == s {
|
||||
continue next
|
||||
}
|
||||
}
|
||||
n = append(n, *f)
|
||||
}
|
||||
return n
|
||||
}
|
1130
vendor/github.com/mjl-/bstore/query.go
generated
vendored
Normal file
1130
vendor/github.com/mjl-/bstore/query.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1215
vendor/github.com/mjl-/bstore/register.go
generated
vendored
Normal file
1215
vendor/github.com/mjl-/bstore/register.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
105
vendor/github.com/mjl-/bstore/stats.go
generated
vendored
Normal file
105
vendor/github.com/mjl-/bstore/stats.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
package bstore
|
||||
|
||||
// StatsKV represent operations on the underlying BoltDB key/value store.
|
||||
type StatsKV struct {
|
||||
Get uint
|
||||
Put uint // For Stats.Bucket, this counts calls of CreateBucket.
|
||||
Delete uint
|
||||
Cursor uint // Any cursor operation: Seek/First/Last/Next/Prev.
|
||||
}
|
||||
|
||||
// Stats tracks DB/Tx/Query statistics, mostly counters.
|
||||
type Stats struct {
|
||||
// Number of read-only or writable transactions. Set for DB only.
|
||||
Reads uint
|
||||
Writes uint
|
||||
|
||||
Bucket StatsKV // Use of buckets.
|
||||
Records StatsKV // Use of records bucket for types.
|
||||
Index StatsKV // Use of index buckets for types.
|
||||
|
||||
// Operations that modify the database. Each record is counted, e.g.
|
||||
// for a query that updates/deletes multiple records.
|
||||
Get uint
|
||||
Insert uint
|
||||
Update uint
|
||||
Delete uint
|
||||
|
||||
Queries uint // Total queries executed.
|
||||
PlanTableScan uint // Full table scans.
|
||||
PlanPK uint // Primary key get.
|
||||
PlanUnique uint // Full key Unique index get.
|
||||
PlanPKScan uint // Scan over primary keys.
|
||||
PlanIndexScan uint // Scan over index.
|
||||
Sort uint // In-memory collect and sort.
|
||||
LastType string // Last type queried.
|
||||
LastIndex string // Last index for LastType used for a query, or empty.
|
||||
LastOrdered bool // Whether last scan (PK or index) use was ordered, e.g. for sorting or because of a comparison filter.
|
||||
LastAsc bool // If ordered, whether last index scan was ascending.
|
||||
}
|
||||
|
||||
func (skv *StatsKV) add(n StatsKV) {
|
||||
skv.Get += n.Get
|
||||
skv.Put += n.Put
|
||||
skv.Delete += n.Delete
|
||||
skv.Cursor += n.Cursor
|
||||
}
|
||||
|
||||
func (skv *StatsKV) sub(n StatsKV) {
|
||||
skv.Get -= n.Get
|
||||
skv.Put -= n.Put
|
||||
skv.Delete -= n.Delete
|
||||
skv.Cursor -= n.Cursor
|
||||
}
|
||||
|
||||
func (st *Stats) add(n Stats) {
|
||||
st.Reads += n.Reads
|
||||
st.Writes += n.Writes
|
||||
|
||||
st.Bucket.add(n.Bucket)
|
||||
st.Records.add(n.Records)
|
||||
st.Index.add(n.Index)
|
||||
|
||||
st.Get += n.Get
|
||||
st.Insert += n.Insert
|
||||
st.Update += n.Update
|
||||
st.Delete += n.Delete
|
||||
|
||||
st.Queries += n.Queries
|
||||
st.PlanTableScan += n.PlanTableScan
|
||||
st.PlanPK += n.PlanPK
|
||||
st.PlanUnique += n.PlanUnique
|
||||
st.PlanPKScan += n.PlanPKScan
|
||||
st.PlanIndexScan += n.PlanIndexScan
|
||||
st.Sort += n.Sort
|
||||
|
||||
st.LastType = n.LastType
|
||||
st.LastIndex = n.LastIndex
|
||||
st.LastOrdered = n.LastOrdered
|
||||
st.LastAsc = n.LastAsc
|
||||
}
|
||||
|
||||
// Sub returns st with the counters from o subtracted.
|
||||
func (st Stats) Sub(o Stats) Stats {
|
||||
st.Reads -= o.Reads
|
||||
st.Writes -= o.Writes
|
||||
|
||||
st.Bucket.sub(o.Bucket)
|
||||
st.Records.sub(o.Records)
|
||||
st.Index.sub(o.Index)
|
||||
|
||||
st.Get -= o.Get
|
||||
st.Insert -= o.Insert
|
||||
st.Update -= o.Update
|
||||
st.Delete -= o.Delete
|
||||
|
||||
st.Queries -= o.Queries
|
||||
st.PlanTableScan -= o.PlanTableScan
|
||||
st.PlanPK -= o.PlanPK
|
||||
st.PlanUnique -= o.PlanUnique
|
||||
st.PlanPKScan -= o.PlanPKScan
|
||||
st.PlanIndexScan -= o.PlanIndexScan
|
||||
st.Sort -= o.Sort
|
||||
|
||||
return st
|
||||
}
|
566
vendor/github.com/mjl-/bstore/store.go
generated
vendored
Normal file
566
vendor/github.com/mjl-/bstore/store.go
generated
vendored
Normal file
@ -0,0 +1,566 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAbsent = errors.New("absent") // If a function can return an ErrAbsent, it can be compared directly, without errors.Is.
|
||||
ErrZero = errors.New("must be nonzero")
|
||||
ErrUnique = errors.New("not unique")
|
||||
ErrReference = errors.New("referential inconsistency")
|
||||
ErrMultiple = errors.New("multiple results")
|
||||
ErrSeq = errors.New("highest autoincrement sequence value reached")
|
||||
ErrType = errors.New("unknown/bad type")
|
||||
ErrIncompatible = errors.New("incompatible types")
|
||||
ErrFinished = errors.New("query finished")
|
||||
ErrStore = errors.New("internal/storage error") // E.g. when buckets disappear, possibly by external users of the underlying BoltDB database.
|
||||
ErrParam = errors.New("bad parameters")
|
||||
|
||||
errTxClosed = errors.New("transaction is closed")
|
||||
errNestedIndex = errors.New("struct tags index/unique only allowed at top-level structs")
|
||||
)
|
||||
|
||||
var sanityChecks bool // Only enabled during tests.
|
||||
|
||||
// DB is a database storing Go struct values in an underlying bolt database.
|
||||
// DB is safe for concurrent use, unlike a Tx or a Query.
|
||||
type DB struct {
|
||||
bdb *bolt.DB
|
||||
|
||||
// Read transaction take an rlock on types. Register can make changes and
|
||||
// needs a wlock.
|
||||
typesMutex sync.RWMutex
|
||||
types map[reflect.Type]storeType
|
||||
typeNames map[string]storeType // Go type name to store type, for checking duplicates.
|
||||
|
||||
statsMutex sync.Mutex
|
||||
stats Stats
|
||||
}
|
||||
|
||||
// Tx is a transaction on DB.
|
||||
//
|
||||
// A Tx is not safe for concurrent use.
|
||||
type Tx struct {
|
||||
db *DB // If nil, this transaction is closed.
|
||||
btx *bolt.Tx
|
||||
|
||||
bucketCache map[bucketKey]*bolt.Bucket
|
||||
|
||||
stats Stats
|
||||
}
|
||||
|
||||
// bucketKey represents a subbucket for a type.
|
||||
type bucketKey struct {
|
||||
typeName string
|
||||
sub string // Empty for top-level type bucket, otherwise "records", "types" or starting with "index.".
|
||||
}
|
||||
|
||||
type index struct {
|
||||
Unique bool
|
||||
Name string // Normally named after the field. But user can specify alternative name with "index" or "unique" struct tag with parameter.
|
||||
Fields []field
|
||||
|
||||
tv *typeVersion
|
||||
}
|
||||
|
||||
type storeType struct {
|
||||
Name string // Name of type as stored in database. Different from the current Go type name if the uses the "typename" struct tag.
|
||||
Type reflect.Type // Type we parse into for new values.
|
||||
Current *typeVersion
|
||||
|
||||
// Earlier schema versions. Older type versions can still be stored. We
|
||||
// prepare them for parsing into the reflect.Type. Some stored fields in
|
||||
// old versions may be ignored: when a later schema has removed the field,
|
||||
// that old stored field is considered deleted and will be ignored when
|
||||
// parsing.
|
||||
Versions map[uint32]*typeVersion
|
||||
}
|
||||
|
||||
// note: when changing, possibly update func equal as well.
|
||||
type typeVersion struct {
|
||||
Version uint32 // First uvarint of a stored record references this version.
|
||||
OndiskVersion uint32 // Version of on-disk format. Currently always 1.
|
||||
Noauto bool // If true, the primary key is an int but opted out of autoincrement.
|
||||
Fields []field // Fields that we store. Embed/anonymous fields are kept separately in embedFields, and are not stored.
|
||||
Indices map[string]*index // By name of index.
|
||||
ReferencedBy map[string]struct{} // Type names that reference this type. We require they are registered at the same time to maintain referential integrity.
|
||||
|
||||
name string
|
||||
referencedBy []*index // Indexes (from other types) that reference this type.
|
||||
references map[string]struct{} // Keys are the type names referenced. This is a summary for the references from Fields.
|
||||
embedFields []embed // Embed/anonymous fields, their values are stored through Fields, we keep them for setting values.
|
||||
|
||||
fillPercent float64 // For "records" bucket. Set to 1 for append-only/mostly use as set with HintAppend, 0.5 otherwise.
|
||||
}
|
||||
|
||||
// note: when changing, possibly update func equal as well.
|
||||
// embed/anonymous fields are represented as type embed. The fields inside the embed type are of this type field.
|
||||
type field struct {
|
||||
Name string
|
||||
Type fieldType
|
||||
Nonzero bool
|
||||
References []string // Referenced fields. Only for the top-level struct fields, not for nested structs.
|
||||
Default string // As specified in struct tag. Processed version is defaultValue.
|
||||
|
||||
// If not the zero reflect.Value, set this value instead of a zero value on insert.
|
||||
// This is always a non-pointer value. Only set for the current typeVersion
|
||||
// linked to a Go type.
|
||||
defaultValue reflect.Value
|
||||
|
||||
// Only set if this typeVersion will parse this field. We check
|
||||
// structField.Type for non-nil before parsing this field. We don't parse it
|
||||
// if this field is no longer in the type, or if it has been removed and
|
||||
// added again in later schema versions.
|
||||
structField reflect.StructField
|
||||
|
||||
indices map[string]*index
|
||||
}
|
||||
|
||||
// embed is for embed/anonymous fields. the fields inside are represented as a type field.
|
||||
type embed struct {
|
||||
Name string
|
||||
Type fieldType
|
||||
structField reflect.StructField
|
||||
}
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
kindInvalid kind = iota
|
||||
kindBytes
|
||||
kindBool
|
||||
kindInt
|
||||
kindInt8
|
||||
kindInt16
|
||||
kindInt32
|
||||
kindInt64
|
||||
kindUint
|
||||
kindUint8
|
||||
kindUint16
|
||||
kindUint32
|
||||
kindUint64
|
||||
kindFloat32
|
||||
kindFloat64
|
||||
kindMap
|
||||
kindSlice
|
||||
kindString
|
||||
kindTime
|
||||
kindBinaryMarshal
|
||||
kindStruct
|
||||
)
|
||||
|
||||
var kindStrings = []string{
|
||||
"(invalid)",
|
||||
"bytes",
|
||||
"bool",
|
||||
"int",
|
||||
"int8",
|
||||
"int16",
|
||||
"int32",
|
||||
"int64",
|
||||
"uint",
|
||||
"uint8",
|
||||
"uint16",
|
||||
"uint32",
|
||||
"uint64",
|
||||
"float32",
|
||||
"float64",
|
||||
"map",
|
||||
"slice",
|
||||
"string",
|
||||
"time",
|
||||
"binarymarshal",
|
||||
"struct",
|
||||
}
|
||||
|
||||
func (k kind) String() string {
|
||||
return kindStrings[k]
|
||||
}
|
||||
|
||||
type fieldType struct {
|
||||
Ptr bool // If type is a pointer.
|
||||
Kind kind // Type with possible Ptr deferenced.
|
||||
Fields []field // For kindStruct.
|
||||
MapKey, MapValue *fieldType // For kindMap.
|
||||
List *fieldType // For kindSlice.
|
||||
}
|
||||
|
||||
func (ft fieldType) String() string {
|
||||
s := ft.Kind.String()
|
||||
if ft.Ptr {
|
||||
return s + "ptr"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Options configure how a database should be opened or initialized.
|
||||
type Options struct {
|
||||
Timeout time.Duration // Abort if opening DB takes longer than Timeout.
|
||||
Perm fs.FileMode // Permissions for new file if created. If zero, 0600 is used.
|
||||
MustExist bool // Before opening, check that file exists. If not, io/fs.ErrNotExist is returned.
|
||||
}
|
||||
|
||||
// Open opens a bstore database and registers types by calling Register.
|
||||
//
|
||||
// If the file does not exist, a new database file is created, unless opts has
|
||||
// MustExist set. Files are created with permission 0600, or with Perm from
|
||||
// Options if nonzero.
|
||||
//
|
||||
// Only one DB instance can be open for a file at a time. Use opts.Timeout to
|
||||
// specify a timeout during open to prevent indefinite blocking.
|
||||
func Open(path string, opts *Options, typeValues ...any) (*DB, error) {
|
||||
var bopts *bolt.Options
|
||||
if opts != nil && opts.Timeout > 0 {
|
||||
bopts = &bolt.Options{Timeout: opts.Timeout}
|
||||
}
|
||||
var mode fs.FileMode = 0600
|
||||
if opts != nil && opts.Perm != 0 {
|
||||
mode = opts.Perm
|
||||
}
|
||||
if opts != nil && opts.MustExist {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
bdb, err := bolt.Open(path, mode, bopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
typeNames := map[string]storeType{}
|
||||
types := map[reflect.Type]storeType{}
|
||||
db := &DB{bdb: bdb, typeNames: typeNames, types: types}
|
||||
if err := db.Register(typeValues...); err != nil {
|
||||
bdb.Close()
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Close closes the underlying database.
|
||||
func (db *DB) Close() error {
|
||||
return db.bdb.Close()
|
||||
}
|
||||
|
||||
// Stats returns usage statistics for the lifetime of DB. Stats are tracked
|
||||
// first in a Query or a Tx. Stats from a Query are propagated to its Tx when
|
||||
// the Query finishes. Stats from a Tx are propagated to its DB when the
|
||||
// transaction ends.
|
||||
func (db *DB) Stats() Stats {
|
||||
db.statsMutex.Lock()
|
||||
defer db.statsMutex.Unlock()
|
||||
return db.stats
|
||||
}
|
||||
|
||||
// Stats returns usage statistics for this transaction.
|
||||
// When a transaction is rolled back or committed, its statistics are copied
|
||||
// into its DB.
|
||||
func (tx *Tx) Stats() Stats {
|
||||
return tx.stats
|
||||
}
|
||||
|
||||
// WriteTo writes the entire database to w, not including changes made during this transaction.
|
||||
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return tx.btx.WriteTo(w)
|
||||
}
|
||||
|
||||
// return a bucket through cache.
|
||||
func (tx *Tx) bucket(bk bucketKey) (*bolt.Bucket, error) {
|
||||
if tx.bucketCache == nil {
|
||||
tx.bucketCache = map[bucketKey]*bolt.Bucket{}
|
||||
}
|
||||
b := tx.bucketCache[bk]
|
||||
if b != nil {
|
||||
return b, nil
|
||||
}
|
||||
top := tx.bucketCache[bucketKey{bk.typeName, ""}]
|
||||
if top == nil {
|
||||
tx.stats.Bucket.Get++
|
||||
top = tx.btx.Bucket([]byte(bk.typeName))
|
||||
if top == nil {
|
||||
return nil, fmt.Errorf("%w: missing bucket for type %q", ErrStore, bk.typeName)
|
||||
}
|
||||
tx.bucketCache[bucketKey{bk.typeName, ""}] = top
|
||||
}
|
||||
if bk.sub == "" {
|
||||
return top, nil
|
||||
}
|
||||
|
||||
tx.stats.Bucket.Get++
|
||||
b = top.Bucket([]byte(bk.sub))
|
||||
if b == nil {
|
||||
return nil, fmt.Errorf("%w: missing bucket %q for type %q", ErrStore, bk.sub, bk.typeName)
|
||||
}
|
||||
tx.bucketCache[bk] = b
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (tx *Tx) typeBucket(typeName string) (*bolt.Bucket, error) {
|
||||
return tx.bucket(bucketKey{typeName, ""})
|
||||
}
|
||||
|
||||
func (tx *Tx) recordsBucket(typeName string, fillPercent float64) (*bolt.Bucket, error) {
|
||||
b, err := tx.bucket(bucketKey{typeName, "records"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.FillPercent = fillPercent
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (tx *Tx) indexBucket(idx *index) (*bolt.Bucket, error) {
|
||||
return tx.bucket(bucketKey{idx.tv.name, "index." + idx.Name})
|
||||
}
|
||||
|
||||
// Drop removes a type and its data from the database.
|
||||
// If the type is currently registered, it is unregistered and no longer available.
|
||||
// If a type is still referenced by another type, eg through a "ref" struct tag,
|
||||
// ErrReference is returned.
|
||||
// If the type does not exist, ErrAbsent is returned.
|
||||
func (db *DB) Drop(name string) error {
|
||||
return db.Write(func(tx *Tx) error {
|
||||
tx.stats.Bucket.Get++
|
||||
if tx.btx.Bucket([]byte(name)) == nil {
|
||||
return ErrAbsent
|
||||
}
|
||||
|
||||
if st, ok := db.typeNames[name]; ok && len(st.Current.referencedBy) > 0 {
|
||||
return fmt.Errorf("%w: type is still referenced", ErrReference)
|
||||
} else if ok {
|
||||
for ref := range st.Current.references {
|
||||
var n []*index
|
||||
for _, idx := range db.typeNames[ref].Current.referencedBy {
|
||||
if idx.tv != st.Current {
|
||||
n = append(n, idx)
|
||||
}
|
||||
}
|
||||
db.typeNames[ref].Current.referencedBy = n
|
||||
}
|
||||
delete(db.typeNames, name)
|
||||
delete(db.types, st.Type)
|
||||
}
|
||||
|
||||
tx.stats.Bucket.Delete++
|
||||
return tx.btx.DeleteBucket([]byte(name))
|
||||
})
|
||||
}
|
||||
|
||||
// Delete calls Delete on a new writable Tx.
|
||||
func (db *DB) Delete(values ...any) error {
|
||||
return db.Write(func(tx *Tx) error {
|
||||
return tx.Delete(values...)
|
||||
})
|
||||
}
|
||||
|
||||
// Get calls Get on a new read-only Tx.
|
||||
func (db *DB) Get(values ...any) error {
|
||||
return db.Read(func(tx *Tx) error {
|
||||
return tx.Get(values...)
|
||||
})
|
||||
}
|
||||
|
||||
// Insert calls Insert on a new writable Tx.
|
||||
func (db *DB) Insert(values ...any) error {
|
||||
return db.Write(func(tx *Tx) error {
|
||||
return tx.Insert(values...)
|
||||
})
|
||||
}
|
||||
|
||||
// Update calls Update on a new writable Tx.
|
||||
func (db *DB) Update(values ...any) error {
|
||||
return db.Write(func(tx *Tx) error {
|
||||
return tx.Update(values...)
|
||||
})
|
||||
}
|
||||
|
||||
var typeKinds = map[reflect.Kind]kind{
|
||||
reflect.Bool: kindBool,
|
||||
reflect.Int: kindInt,
|
||||
reflect.Int8: kindInt8,
|
||||
reflect.Int16: kindInt16,
|
||||
reflect.Int32: kindInt32,
|
||||
reflect.Int64: kindInt64,
|
||||
reflect.Uint: kindUint,
|
||||
reflect.Uint8: kindUint8,
|
||||
reflect.Uint16: kindUint16,
|
||||
reflect.Uint32: kindUint32,
|
||||
reflect.Uint64: kindUint64,
|
||||
reflect.Float32: kindFloat32,
|
||||
reflect.Float64: kindFloat64,
|
||||
reflect.Map: kindMap,
|
||||
reflect.Slice: kindSlice,
|
||||
reflect.String: kindString,
|
||||
}
|
||||
|
||||
func typeKind(t reflect.Type) (kind, error) {
|
||||
if t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
|
||||
return kindBytes, nil
|
||||
}
|
||||
|
||||
k, ok := typeKinds[t.Kind()]
|
||||
if ok {
|
||||
return k, nil
|
||||
}
|
||||
|
||||
if t == reflect.TypeOf(zerotime) {
|
||||
return kindTime, nil
|
||||
}
|
||||
|
||||
if reflect.PointerTo(t).AssignableTo(reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()) {
|
||||
return kindBinaryMarshal, nil
|
||||
}
|
||||
|
||||
if t.Kind() == reflect.Struct {
|
||||
return kindStruct, nil
|
||||
}
|
||||
return kind(0), fmt.Errorf("%w: unsupported type %v", ErrType, t)
|
||||
}
|
||||
|
||||
func typeName(t reflect.Type) (string, error) {
|
||||
tags, err := newStoreTags(t.Field(0).Tag.Get("bstore"), true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if name, err := tags.Get("typename"); err != nil {
|
||||
return "", err
|
||||
} else if name != "" {
|
||||
return name, nil
|
||||
}
|
||||
return t.Name(), nil
|
||||
}
|
||||
|
||||
// Get value for a key. For insert a next sequence may be generated for the
|
||||
// primary key.
|
||||
func (tv typeVersion) keyValue(tx *Tx, rv reflect.Value, insert bool, rb *bolt.Bucket) ([]byte, reflect.Value, bool, error) {
|
||||
f := tv.Fields[0]
|
||||
krv := rv.FieldByIndex(f.structField.Index)
|
||||
var seq bool
|
||||
if krv.IsZero() {
|
||||
if !insert {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key can not be zero value", ErrParam)
|
||||
}
|
||||
if tv.Noauto {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key cannot be zero value without autoincrement", ErrParam)
|
||||
}
|
||||
id, err := rb.NextSequence()
|
||||
if err != nil {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("next primary key: %w", err)
|
||||
}
|
||||
switch f.Type.Kind {
|
||||
case kindInt, kindInt8, kindInt16, kindInt32, kindInt64:
|
||||
if krv.OverflowInt(int64(id)) {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq)
|
||||
}
|
||||
krv.SetInt(int64(id))
|
||||
case kindUint, kindUint8, kindUint16, kindUint32, kindUint64:
|
||||
if krv.OverflowUint(id) {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq)
|
||||
}
|
||||
krv.SetUint(id)
|
||||
default:
|
||||
// todo: should check this during register.
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: unsupported autoincrement primary key type %v", ErrZero, f.Type.Kind)
|
||||
}
|
||||
seq = true
|
||||
} else if !tv.Noauto && insert {
|
||||
// We let user insert their own ID for our own autoincrement
|
||||
// PK. But we update the internal next sequence if the users's
|
||||
// PK is highest yet, so a future autoincrement insert will succeed.
|
||||
switch f.Type.Kind {
|
||||
case kindInt, kindInt8, kindInt16, kindInt32, kindInt64:
|
||||
v := krv.Int()
|
||||
if v > 0 && uint64(v) > rb.Sequence() {
|
||||
if err := rb.SetSequence(uint64(v)); err != nil {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err)
|
||||
}
|
||||
}
|
||||
case kindUint, kindUint8, kindUint16, kindUint32, kindUint64:
|
||||
v := krv.Uint()
|
||||
if v > rb.Sequence() {
|
||||
if err := rb.SetSequence(v); err != nil {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
k, err := packPK(krv)
|
||||
if err != nil {
|
||||
return nil, reflect.Value{}, seq, err
|
||||
}
|
||||
if seq {
|
||||
tx.stats.Records.Get++
|
||||
if rb.Get(k) != nil {
|
||||
return nil, reflect.Value{}, seq, fmt.Errorf("%w: internal error: next sequence value is already present", ErrUnique)
|
||||
}
|
||||
}
|
||||
return k, krv, seq, err
|
||||
}
|
||||
|
||||
// Read calls function fn with a new read-only transaction, ensuring transaction rollback.
|
||||
func (db *DB) Read(fn func(*Tx) error) error {
|
||||
db.typesMutex.RLock()
|
||||
defer db.typesMutex.RUnlock()
|
||||
return db.bdb.View(func(btx *bolt.Tx) error {
|
||||
tx := &Tx{db: db, btx: btx}
|
||||
tx.stats.Reads++
|
||||
defer tx.addStats()
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
||||
|
||||
// Write calls function fn with a new read-write transaction. If fn returns
|
||||
// nil, the transaction is committed. Otherwise the transaction is rolled back.
|
||||
func (db *DB) Write(fn func(*Tx) error) error {
|
||||
db.typesMutex.RLock()
|
||||
defer db.typesMutex.RUnlock()
|
||||
return db.bdb.Update(func(btx *bolt.Tx) error {
|
||||
tx := &Tx{db: db, btx: btx}
|
||||
tx.stats.Writes++
|
||||
defer tx.addStats()
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
||||
|
||||
// lookup storeType based on name of rt.
|
||||
func (db *DB) storeType(rt reflect.Type) (storeType, error) {
|
||||
st, ok := db.types[rt]
|
||||
if !ok {
|
||||
return storeType{}, fmt.Errorf("%w: %v", ErrType, rt)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// HintAppend sets a hint whether changes to the types indicated by each struct
|
||||
// from values is (mostly) append-only.
|
||||
//
|
||||
// This currently sets the BoltDB bucket FillPercentage to 1 for efficient use
|
||||
// of storage space.
|
||||
func (db *DB) HintAppend(append bool, values ...any) error {
|
||||
db.typesMutex.Lock()
|
||||
defer db.typesMutex.Unlock()
|
||||
for _, v := range values {
|
||||
t := reflect.TypeOf(v)
|
||||
st, err := db.storeType(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if append {
|
||||
st.Current.fillPercent = 1.0
|
||||
} else {
|
||||
st.Current.fillPercent = 0.5
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
69
vendor/github.com/mjl-/bstore/tags.go
generated
vendored
Normal file
69
vendor/github.com/mjl-/bstore/tags.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type storeTags []string
|
||||
|
||||
func newStoreTags(tag string, isPK bool) (storeTags, error) {
|
||||
if tag == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
l := strings.Split(tag, ",")
|
||||
for _, s := range l {
|
||||
w := strings.SplitN(s, " ", 2)
|
||||
switch w[0] {
|
||||
case "noauto", "typename":
|
||||
if !isPK {
|
||||
return nil, fmt.Errorf("%w: cannot have tag %q for non-primary key", ErrType, w[0])
|
||||
}
|
||||
case "index", "unique", "default", "-":
|
||||
if isPK {
|
||||
return nil, fmt.Errorf("%w: cannot have tag %q on primary key", ErrType, w[0])
|
||||
}
|
||||
case "name", "nonzero", "ref":
|
||||
default:
|
||||
return nil, fmt.Errorf("%w: unknown store tag %q", ErrType, w[0])
|
||||
}
|
||||
}
|
||||
return storeTags(l), nil
|
||||
}
|
||||
|
||||
func (t storeTags) Has(word string) bool {
|
||||
for _, s := range t {
|
||||
if s == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t storeTags) Get(word string) (string, error) {
|
||||
wordsp := word + " "
|
||||
for _, s := range t {
|
||||
if strings.HasPrefix(s, wordsp) {
|
||||
r := s[len(wordsp):]
|
||||
if r == "" {
|
||||
return "", fmt.Errorf("%w: bstore word %q requires non-empty parameter", ErrType, word)
|
||||
}
|
||||
return r, nil
|
||||
} else if s == word {
|
||||
return "", fmt.Errorf("%w: bstore word %q requires argument", ErrType, word)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (t storeTags) List(word string) []string {
|
||||
var l []string
|
||||
wordsp := word + " "
|
||||
for _, s := range t {
|
||||
if strings.HasPrefix(s, wordsp) {
|
||||
l = append(l, s[len(wordsp):])
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
438
vendor/github.com/mjl-/bstore/tx.go
generated
vendored
Normal file
438
vendor/github.com/mjl-/bstore/tx.go
generated
vendored
Normal file
@ -0,0 +1,438 @@
|
||||
package bstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
func (tx *Tx) structptr(value any) (reflect.Value, error) {
|
||||
rv := reflect.ValueOf(value)
|
||||
if !rv.IsValid() || rv.Kind() != reflect.Ptr || !rv.Elem().IsValid() || rv.Type().Elem().Kind() != reflect.Struct {
|
||||
return reflect.Value{}, fmt.Errorf("%w: value must be non-nil pointer to a struct, is %T", ErrParam, value)
|
||||
}
|
||||
rv = rv.Elem()
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (tx *Tx) structOrStructptr(value any) (reflect.Value, error) {
|
||||
rv := reflect.ValueOf(value)
|
||||
if !rv.IsValid() {
|
||||
return reflect.Value{}, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam)
|
||||
}
|
||||
if rv.Kind() == reflect.Ptr {
|
||||
rv = rv.Elem()
|
||||
if !rv.IsValid() {
|
||||
return rv, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam)
|
||||
}
|
||||
}
|
||||
if rv.Kind() != reflect.Struct {
|
||||
return reflect.Value{}, fmt.Errorf("%w: value must be a struct or pointer to a struct, is %T", ErrParam, value)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
// update indices by comparing indexed fields of the ov (old) and v (new). Only if
|
||||
// the fields changed will the index be updated. Either ov or v may be the
|
||||
// reflect.Value zero value, indicating there is no old/new value and the index
|
||||
// should be updated.
|
||||
func (tx *Tx) updateIndices(tv *typeVersion, pk []byte, ov, v reflect.Value) error {
|
||||
|
||||
changed := func(idx *index) bool {
|
||||
for _, f := range idx.Fields {
|
||||
rofv := ov.FieldByIndex(f.structField.Index)
|
||||
nofv := v.FieldByIndex(f.structField.Index)
|
||||
// note: checking the interface values is enough, we only allow comparable types as index fields.
|
||||
if rofv.Interface() != nofv.Interface() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
for _, idx := range tv.Indices {
|
||||
var add, remove bool
|
||||
if !ov.IsValid() {
|
||||
add = true
|
||||
} else if !v.IsValid() {
|
||||
remove = true
|
||||
} else if !changed(idx) {
|
||||
continue
|
||||
} else {
|
||||
add, remove = true, true
|
||||
}
|
||||
|
||||
ib, err := tx.indexBucket(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if remove {
|
||||
_, ik, err := idx.packKey(ov, pk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tx.stats.Index.Delete++
|
||||
if sanityChecks {
|
||||
tx.stats.Index.Get++
|
||||
if ib.Get(ik) == nil {
|
||||
return fmt.Errorf("internal error: key missing from index")
|
||||
}
|
||||
}
|
||||
if err := ib.Delete(ik); err != nil {
|
||||
return fmt.Errorf("%w: removing from index: %s", ErrStore, err)
|
||||
}
|
||||
}
|
||||
if add {
|
||||
prek, ik, err := idx.packKey(v, pk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if idx.Unique {
|
||||
tx.stats.Index.Cursor++
|
||||
if xk, _ := ib.Cursor().Seek(prek); xk != nil && bytes.HasPrefix(xk, prek) {
|
||||
return fmt.Errorf("%w: %q", ErrUnique, idx.Name)
|
||||
}
|
||||
}
|
||||
|
||||
tx.stats.Index.Put++
|
||||
if err := ib.Put(ik, []byte{}); err != nil {
|
||||
return fmt.Errorf("inserting into index: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) checkReferences(tv *typeVersion, pk []byte, ov, rv reflect.Value) error {
|
||||
for _, f := range tv.Fields {
|
||||
if len(f.References) == 0 {
|
||||
continue
|
||||
}
|
||||
frv := rv.FieldByIndex(f.structField.Index)
|
||||
if frv.IsZero() || (ov.IsValid() && ov.FieldByIndex(f.structField.Index).Interface() == frv.Interface()) {
|
||||
continue
|
||||
}
|
||||
k, err := packPK(frv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, name := range f.References {
|
||||
rb, err := tx.recordsBucket(name, tv.fillPercent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rb.Get(k) == nil {
|
||||
return fmt.Errorf("%w: value %v from field %q to %q", ErrReference, frv.Interface(), f.Name, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) addStats() {
|
||||
tx.db.statsMutex.Lock()
|
||||
tx.db.stats.add(tx.stats)
|
||||
tx.db.statsMutex.Unlock()
|
||||
tx.stats = Stats{}
|
||||
}
|
||||
|
||||
// Get fetches records by their primary key from the database. Each value must
|
||||
// be a pointer to a struct.
|
||||
//
|
||||
// ErrAbsent is returned if the record does not exist.
|
||||
func (tx *Tx) Get(values ...any) error {
|
||||
if tx.db == nil {
|
||||
return errTxClosed
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
tx.stats.Get++
|
||||
rv, err := tx.structptr(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st, err := tx.db.storeType(rv.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, _, _, err := st.Current.keyValue(tx, rv, false, rb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tx.stats.Records.Get++
|
||||
bv := rb.Get(k)
|
||||
if bv == nil {
|
||||
return ErrAbsent
|
||||
}
|
||||
if err := st.parse(rv, bv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes values by their primary key from the database. Each value
|
||||
// must be a struct or pointer to a struct. Indices are automatically updated
|
||||
// and referential integrity is maintained.
|
||||
//
|
||||
// ErrAbsent is returned if the record does not exist.
|
||||
// ErrReference is returned if another record still references this record.
|
||||
func (tx *Tx) Delete(values ...any) error {
|
||||
if tx.db == nil {
|
||||
return errTxClosed
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
tx.stats.Delete++
|
||||
rv, err := tx.structOrStructptr(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st, err := tx.db.storeType(rv.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, _, _, err := st.Current.keyValue(tx, rv, false, rb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tx.stats.Records.Get++
|
||||
bv := rb.Get(k)
|
||||
if bv == nil {
|
||||
return ErrAbsent
|
||||
}
|
||||
rov, err := st.parseNew(k, bv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing current value: %w", err)
|
||||
}
|
||||
if err := tx.delete(rb, st, k, rov); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) delete(rb *bolt.Bucket, st storeType, k []byte, rov reflect.Value) error {
|
||||
// Check that anyone referencing this type does not reference this record.
|
||||
for _, refBy := range st.Current.referencedBy {
|
||||
if ib, err := tx.indexBucket(refBy); err != nil {
|
||||
return err
|
||||
} else {
|
||||
tx.stats.Index.Cursor++
|
||||
if xk, _ := ib.Cursor().Seek(k); xk != nil && bytes.HasPrefix(xk, k) {
|
||||
return fmt.Errorf("%w: index %q", ErrReference, refBy.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delete value from indices.
|
||||
if err := tx.updateIndices(st.Current, k, rov, reflect.Value{}); err != nil {
|
||||
return fmt.Errorf("removing from indices: %w", err)
|
||||
}
|
||||
|
||||
tx.stats.Records.Delete++
|
||||
return rb.Delete(k)
|
||||
}
|
||||
|
||||
// Update updates records represented by values by their primary keys into the
|
||||
// database. Each value must be a pointer to a struct. Indices are
|
||||
// automatically updated.
|
||||
//
|
||||
// ErrAbsent is returned if the record does not exist.
|
||||
func (tx *Tx) Update(values ...any) error {
|
||||
if tx.db == nil {
|
||||
return errTxClosed
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
tx.stats.Update++
|
||||
rv, err := tx.structptr(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
st, err := tx.db.storeType(rv.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.put(st, rv, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Insert inserts values as new records into the database. Each value must be a
|
||||
// pointer to a struct. If the primary key field is zero and autoincrement is not
|
||||
// disabled, the next sequence is assigned. Indices are automatically updated.
|
||||
//
|
||||
// ErrUnique is returned if the record already exists.
|
||||
// ErrSeq is returned if no next autoincrement integer is available.
|
||||
// ErrZero is returned if a nonzero constraint would be violated.
|
||||
// ErrReference is returned if another record is referenced that does not exist.
|
||||
func (tx *Tx) Insert(values ...any) error {
|
||||
if tx.db == nil {
|
||||
return errTxClosed
|
||||
}
|
||||
|
||||
for _, value := range values {
|
||||
tx.stats.Insert++
|
||||
rv, err := tx.structptr(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
st, err := tx.db.storeType(rv.Type())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := st.Current.applyDefault(rv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tx.put(st, rv, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) put(st storeType, rv reflect.Value, insert bool) error {
|
||||
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, krv, seq, err := st.Current.keyValue(tx, rv, insert, rb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if insert {
|
||||
tx.stats.Records.Get++
|
||||
bv := rb.Get(k)
|
||||
if bv != nil {
|
||||
return fmt.Errorf("%w: record already exists", ErrUnique)
|
||||
}
|
||||
err := tx.insert(rb, st, rv, krv, k)
|
||||
if err != nil && seq {
|
||||
// Zero out the generated sequence.
|
||||
krv.Set(reflect.Zero(krv.Type()))
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
tx.stats.Records.Get++
|
||||
bv := rb.Get(k)
|
||||
if bv == nil {
|
||||
return ErrAbsent
|
||||
}
|
||||
ov, err := st.parseNew(k, bv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing current value: %w", err)
|
||||
}
|
||||
return tx.update(rb, st, rv, ov, k)
|
||||
}
|
||||
}
|
||||
|
||||
func (tx *Tx) insert(rb *bolt.Bucket, st storeType, rv, krv reflect.Value, k []byte) error {
|
||||
v, err := st.pack(rv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.checkReferences(st.Current, k, reflect.Value{}, rv); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.updateIndices(st.Current, k, reflect.Value{}, rv); err != nil {
|
||||
return fmt.Errorf("updating indices for inserted value: %w", err)
|
||||
}
|
||||
tx.stats.Records.Put++
|
||||
if err := rb.Put(k, v); err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Field(0).Set(krv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) update(rb *bolt.Bucket, st storeType, rv, rov reflect.Value, k []byte) error {
|
||||
if st.Current.equal(rov, rv) {
|
||||
return nil
|
||||
}
|
||||
|
||||
v, err := st.pack(rv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.checkReferences(st.Current, k, rov, rv); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.updateIndices(st.Current, k, rov, rv); err != nil {
|
||||
return fmt.Errorf("updating indices for updated record: %w", err)
|
||||
}
|
||||
tx.stats.Records.Put++
|
||||
return rb.Put(k, v)
|
||||
}
|
||||
|
||||
// Begin starts a transaction.
|
||||
//
|
||||
// If writable is true, the transaction allows modifications. Only one writable
|
||||
// transaction can be active at a time on a DB. No read-only transactions can be
|
||||
// active at the same time. Attempting to begin a read-only transaction from a
|
||||
// writable transaction leads to deadlock.
|
||||
//
|
||||
// A writable Tx can be committed or rolled back. A read-only transaction must
|
||||
// always be rolled back.
|
||||
func (db *DB) Begin(writable bool) (*Tx, error) {
|
||||
btx, err := db.bdb.Begin(writable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.typesMutex.RLock()
|
||||
tx := &Tx{db: db, btx: btx}
|
||||
if writable {
|
||||
tx.stats.Writes++
|
||||
} else {
|
||||
tx.stats.Reads++
|
||||
}
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
// Rollback aborts and cancels any changes made in this transaction.
|
||||
// Statistics are added to its DB.
|
||||
func (tx *Tx) Rollback() error {
|
||||
if tx.db == nil {
|
||||
return errTxClosed
|
||||
}
|
||||
|
||||
tx.addStats()
|
||||
tx.db.typesMutex.RUnlock()
|
||||
err := tx.btx.Rollback()
|
||||
tx.db = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit commits changes made in the transaction to the database.
|
||||
// Statistics are added to its DB.
|
||||
func (tx *Tx) Commit() error {
|
||||
if tx.db == nil {
|
||||
return errTxClosed
|
||||
}
|
||||
|
||||
tx.addStats()
|
||||
tx.db.typesMutex.RUnlock()
|
||||
err := tx.btx.Commit()
|
||||
if err != nil {
|
||||
tx.btx.Rollback() // Nothing to do for error.
|
||||
}
|
||||
tx.db = nil
|
||||
return err
|
||||
}
|
2
vendor/github.com/mjl-/sconf/.gitignore
generated
vendored
Normal file
2
vendor/github.com/mjl-/sconf/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
/cmd/sconfexample/sconfexample
|
||||
/cover.*
|
7
vendor/github.com/mjl-/sconf/LICENSE
generated
vendored
Normal file
7
vendor/github.com/mjl-/sconf/LICENSE
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
Copyright (c) 2019 Mechiel Lukkien
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
12
vendor/github.com/mjl-/sconf/Makefile
generated
vendored
Normal file
12
vendor/github.com/mjl-/sconf/Makefile
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
build:
|
||||
go build ./...
|
||||
go vet ./...
|
||||
GOARCH=386 go vet ./...
|
||||
staticcheck ./...
|
||||
|
||||
fmt:
|
||||
gofmt -w -s *.go cmd/*/*.go
|
||||
|
||||
test:
|
||||
go test -shuffle=on -coverprofile cover.out
|
||||
go tool cover -html=cover.out -o cover.html
|
6
vendor/github.com/mjl-/sconf/README.txt
generated
vendored
Normal file
6
vendor/github.com/mjl-/sconf/README.txt
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
sconf - simple config files
|
||||
|
||||
See https://godoc.org/github.com/mjl-/sconf for documentation.
|
||||
|
||||
# todo
|
||||
- deal better with unexpected types. need to use canset?
|
264
vendor/github.com/mjl-/sconf/describe.go
generated
vendored
Normal file
264
vendor/github.com/mjl-/sconf/describe.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
package sconf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/xfmt"
|
||||
)
|
||||
|
||||
var errNoElem = errors.New("no elements")
|
||||
|
||||
type writeError struct{ error }
|
||||
|
||||
type writer struct {
|
||||
out *bufio.Writer
|
||||
prefix string
|
||||
keepZero bool // If set, we also write zero values.
|
||||
docs bool // If set, we write comments.
|
||||
}
|
||||
|
||||
func (w *writer) error(err error) {
|
||||
panic(writeError{err})
|
||||
}
|
||||
|
||||
func (w *writer) check(err error) {
|
||||
if err != nil {
|
||||
w.error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writer) write(s string) {
|
||||
_, err := w.out.WriteString(s)
|
||||
w.check(err)
|
||||
}
|
||||
|
||||
func (w *writer) flush() {
|
||||
err := w.out.Flush()
|
||||
w.check(err)
|
||||
}
|
||||
|
||||
func (w *writer) indent() {
|
||||
w.prefix += "\t"
|
||||
}
|
||||
|
||||
func (w *writer) unindent() {
|
||||
w.prefix = w.prefix[:len(w.prefix)-1]
|
||||
}
|
||||
|
||||
func isOptional(sconfTag string) bool {
|
||||
return hasTagWord(sconfTag, "optional")
|
||||
}
|
||||
|
||||
func isIgnore(sconfTag string) bool {
|
||||
return hasTagWord(sconfTag, "-") || hasTagWord(sconfTag, "ignore")
|
||||
}
|
||||
|
||||
func hasTagWord(sconfTag, word string) bool {
|
||||
l := strings.Split(sconfTag, ",")
|
||||
for _, s := range l {
|
||||
if s == word {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *writer) describeMap(v reflect.Value) {
|
||||
t := v.Type()
|
||||
if t.Key().Kind() != reflect.String {
|
||||
w.error(fmt.Errorf("map key must be string"))
|
||||
}
|
||||
keys := v.MapKeys()
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
return keys[i].String() < keys[j].String()
|
||||
})
|
||||
have := false
|
||||
for _, k := range keys {
|
||||
have = true
|
||||
w.write(w.prefix)
|
||||
w.write(k.String() + ":")
|
||||
mv := v.MapIndex(k)
|
||||
if !w.keepZero && mv.Kind() == reflect.Struct && isEmptyStruct(mv) {
|
||||
w.write(" nil\n")
|
||||
continue
|
||||
}
|
||||
w.describeValue(mv)
|
||||
}
|
||||
if have {
|
||||
return
|
||||
}
|
||||
w.write(w.prefix)
|
||||
w.write("x:")
|
||||
w.describeValue(reflect.Zero(t.Elem()))
|
||||
}
|
||||
|
||||
// whether v is a zero value of a struct type with all fields optional or
|
||||
// ignored, causing it to write nothing when using Write.
|
||||
func isEmptyStruct(v reflect.Value) bool {
|
||||
if v.Kind() != reflect.Struct {
|
||||
panic("not a struct")
|
||||
}
|
||||
t := v.Type()
|
||||
n := t.NumField()
|
||||
for i := 0; i < n; i++ {
|
||||
ft := t.Field(i)
|
||||
tag := ft.Tag.Get("sconf")
|
||||
if isIgnore(tag) {
|
||||
continue
|
||||
}
|
||||
if !isOptional(tag) {
|
||||
return false
|
||||
}
|
||||
if !isZeroIgnored(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// whether v is zero, taking ignored values into account.
|
||||
func isZeroIgnored(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice, reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Ptr:
|
||||
return v.IsZero() || isZeroIgnored(v.Elem())
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
n := t.NumField()
|
||||
for i := 0; i < n; i++ {
|
||||
ft := t.Field(i)
|
||||
tag := ft.Tag.Get("sconf")
|
||||
if isIgnore(tag) {
|
||||
continue
|
||||
}
|
||||
if !isZeroIgnored(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
default:
|
||||
return v.IsZero()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writer) describeStruct(v reflect.Value) {
|
||||
t := v.Type()
|
||||
n := t.NumField()
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
fv := v.Field(i)
|
||||
if isIgnore(f.Tag.Get("sconf")) {
|
||||
continue
|
||||
}
|
||||
if !w.keepZero && isOptional(f.Tag.Get("sconf")) && isZeroIgnored(fv) {
|
||||
continue
|
||||
}
|
||||
if w.docs {
|
||||
doc := f.Tag.Get("sconf-doc")
|
||||
optional := isOptional(f.Tag.Get("sconf"))
|
||||
if doc != "" || optional {
|
||||
s := "\n" + w.prefix + "# " + doc
|
||||
if optional {
|
||||
opt := "(optional)"
|
||||
if doc != "" {
|
||||
opt = " " + opt
|
||||
}
|
||||
s += opt
|
||||
}
|
||||
s += "\n"
|
||||
b := &strings.Builder{}
|
||||
err := xfmt.Format(b, strings.NewReader(s), xfmt.Config{MaxWidth: 80})
|
||||
w.check(err)
|
||||
w.write(b.String())
|
||||
}
|
||||
}
|
||||
w.write(w.prefix)
|
||||
w.write(f.Name + ":")
|
||||
w.describeValue(fv)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writer) describeValue(v reflect.Value) {
|
||||
t := v.Type()
|
||||
i := v.Interface()
|
||||
|
||||
if t == durationType {
|
||||
w.write(fmt.Sprintf(" %s\n", i))
|
||||
return
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
default:
|
||||
w.error(fmt.Errorf("unsupported value %v", t.Kind()))
|
||||
return
|
||||
|
||||
case reflect.Bool:
|
||||
w.write(fmt.Sprintf(" %v\n", i))
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
w.write(fmt.Sprintf(" %d\n", i))
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
w.write(fmt.Sprintf(" %f\n", i))
|
||||
|
||||
case reflect.String:
|
||||
if strings.Contains(v.String(), "\n") {
|
||||
w.error(fmt.Errorf("unsupported multiline string"))
|
||||
}
|
||||
w.write(fmt.Sprintf(" %s\n", i))
|
||||
|
||||
case reflect.Slice:
|
||||
w.write("\n")
|
||||
w.indent()
|
||||
w.describeSlice(v)
|
||||
w.unindent()
|
||||
|
||||
case reflect.Ptr:
|
||||
var pv reflect.Value
|
||||
if v.IsNil() {
|
||||
pv = reflect.New(t.Elem()).Elem()
|
||||
} else {
|
||||
pv = v.Elem()
|
||||
}
|
||||
w.describeValue(pv)
|
||||
|
||||
case reflect.Struct:
|
||||
w.write("\n")
|
||||
w.indent()
|
||||
w.describeStruct(v)
|
||||
w.unindent()
|
||||
|
||||
case reflect.Map:
|
||||
w.write("\n")
|
||||
w.indent()
|
||||
w.describeMap(v)
|
||||
w.unindent()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *writer) describeSlice(v reflect.Value) {
|
||||
describeElem := func(vv reflect.Value) {
|
||||
w.write(w.prefix)
|
||||
w.write("-")
|
||||
w.describeValue(vv)
|
||||
}
|
||||
|
||||
n := v.Len()
|
||||
if n == 0 {
|
||||
if w.keepZero {
|
||||
describeElem(reflect.New(v.Type().Elem()))
|
||||
} else {
|
||||
w.error(errNoElem)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
describeElem(v.Index(i))
|
||||
}
|
||||
}
|
106
vendor/github.com/mjl-/sconf/doc.go
generated
vendored
Normal file
106
vendor/github.com/mjl-/sconf/doc.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
/*
|
||||
Package sconf parses simple configuration files and generates commented example config files.
|
||||
|
||||
Sconf is the name of this package and of the config file format. The file format
|
||||
is inspired by JSON and yaml, but easier to write and use correctly.
|
||||
|
||||
Sconf goals:
|
||||
|
||||
- Make the application self-documenting about its configuration requirements.
|
||||
- Require full configuration of an application via a config file, finding
|
||||
mistakes by the operator.
|
||||
- Make it easy to write a correct config file, no surprises.
|
||||
|
||||
Workflow for using this package:
|
||||
|
||||
- Write a Go struct with the config for your application.
|
||||
- Simply parse a config into that struct with Parse() or ParseFile().
|
||||
- Write out an example config file with all fields that need to be set with
|
||||
Describe(), and associated comments that you configured in struct tags.
|
||||
|
||||
Features of sconf as file format:
|
||||
|
||||
- Types similar to JSON, mapping naturally to types in programming languages.
|
||||
- Requires far fewer type-describing tokens. no "" for map keys, strings don't
|
||||
require "", no [] for arrays or {} for maps (like in JSON). Sconf uses the Go
|
||||
types to guide parsing the config.
|
||||
- Can have comments (JSON cannot).
|
||||
- Is simple, does not allow all kinds of syntaxes you would not ever want to use.
|
||||
- Uses indenting for nested structures (with the indent character).
|
||||
|
||||
An example config file:
|
||||
|
||||
# comment for stringKey (optional)
|
||||
StringKey: value1
|
||||
IntKey: 123
|
||||
BoolKey: true
|
||||
Struct:
|
||||
# this is the A-field
|
||||
A: 321
|
||||
B: true
|
||||
# (optional)
|
||||
C: this is text
|
||||
StringArray:
|
||||
- blah
|
||||
- blah
|
||||
# nested structs work just as well
|
||||
Nested:
|
||||
-
|
||||
A: 1
|
||||
B: false
|
||||
C: hoi
|
||||
-
|
||||
A: -1
|
||||
B: true
|
||||
C: hallo
|
||||
|
||||
The top-level is always a map, typically parsed into a Go struct. Maps start
|
||||
with a key, followed by a colon, followed by a value. Basic values like
|
||||
strings, ints, bools run to the end of the line. The leading space after a
|
||||
colon or dash is removed. Other values like maps and lists start on a new line,
|
||||
with an additional level of indenting. List values start with a dash. Empty
|
||||
lines are allowed. Multiline strings are not possible. Strings do not have
|
||||
escaped characters.
|
||||
|
||||
And the struct that generated this:
|
||||
|
||||
var config struct {
|
||||
StringKey string `sconf-doc:"comment for stringKey" sconf:"optional"`
|
||||
IntKey int64
|
||||
BoolKey bool
|
||||
Struct struct {
|
||||
A int `sconf-doc:"this is the A-field"`
|
||||
B bool
|
||||
C string `sconf:"optional"`
|
||||
}
|
||||
StringArray []string
|
||||
Nested []struct {
|
||||
A int
|
||||
B bool
|
||||
C string
|
||||
} `sconf-doc:"nested structs work just as well"`
|
||||
}
|
||||
|
||||
See cmd/sconfexample/main.go for more details.
|
||||
|
||||
In practice, you will mostly have nested maps:
|
||||
|
||||
Database:
|
||||
Host: localhost
|
||||
DBName: myapp
|
||||
User: myuser
|
||||
Mail:
|
||||
SMTP:
|
||||
TLS: true
|
||||
Host: mail.example.org
|
||||
|
||||
Sconf only parses config files. It does not deal with command-line flags or
|
||||
environment variables. Flags and environment variables are too limiting in data
|
||||
types. Especially environment variables are error prone: Applications typically
|
||||
have default values they fall back to, so will not notice typo's or unrecognized
|
||||
variables. Config files also have the nice property of being easy to diff, copy
|
||||
around, store in a VCS. In practice, command-line flags and environment
|
||||
variables are commonly stored in config files. Sconf goes straight to the config
|
||||
files.
|
||||
*/
|
||||
package sconf
|
308
vendor/github.com/mjl-/sconf/parse.go
generated
vendored
Normal file
308
vendor/github.com/mjl-/sconf/parse.go
generated
vendored
Normal file
@ -0,0 +1,308 @@
|
||||
package sconf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
prefix string // indented string
|
||||
input *bufio.Reader // for reading lines at a time
|
||||
line string // last read line
|
||||
linenumber int
|
||||
}
|
||||
|
||||
type parseError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func parse(path string, src io.Reader, dst interface{}) (err error) {
|
||||
p := &parser{
|
||||
input: bufio.NewReader(src),
|
||||
}
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
perr, ok := x.(parseError)
|
||||
if ok {
|
||||
err = fmt.Errorf("%s:%d: %v", path, p.linenumber, perr.err)
|
||||
return
|
||||
}
|
||||
panic(x)
|
||||
}()
|
||||
v := reflect.ValueOf(dst)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
p.stop("destination not a pointer")
|
||||
}
|
||||
p.parseStruct0(v.Elem())
|
||||
return
|
||||
}
|
||||
|
||||
func (p *parser) stop(err string) {
|
||||
panic(parseError{errors.New(err)})
|
||||
}
|
||||
|
||||
func (p *parser) check(err error, action string) {
|
||||
if err != nil {
|
||||
p.stop(fmt.Sprintf("%s: %s", action, err))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) string() string {
|
||||
return p.line
|
||||
}
|
||||
|
||||
func (p *parser) leave(s string) {
|
||||
p.line = s
|
||||
}
|
||||
|
||||
func (p *parser) consume() string {
|
||||
s := p.line
|
||||
p.line = ""
|
||||
return s
|
||||
}
|
||||
|
||||
// Next returns whether the next line is properly indented, reading data as necessary.
|
||||
func (p *parser) next() bool {
|
||||
for p.line == "" {
|
||||
s, err := p.input.ReadString('\n')
|
||||
if s == "" {
|
||||
if err == io.EOF {
|
||||
return false
|
||||
}
|
||||
p.stop(err.Error())
|
||||
}
|
||||
p.linenumber++
|
||||
if strings.HasPrefix(strings.TrimSpace(s), "#") {
|
||||
continue
|
||||
}
|
||||
p.line = strings.TrimSuffix(s, "\n")
|
||||
}
|
||||
|
||||
// Less indenting than expected. Let caller stop, returning to its caller for lower-level indent.
|
||||
r := strings.HasPrefix(p.line, p.prefix)
|
||||
return r
|
||||
}
|
||||
|
||||
func (p *parser) indent() {
|
||||
p.prefix += "\t"
|
||||
if !p.next() {
|
||||
p.stop("expected indent")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) unindent() {
|
||||
p.prefix = p.prefix[1:]
|
||||
}
|
||||
|
||||
var durationType = reflect.TypeOf(time.Duration(0))
|
||||
|
||||
func (p *parser) parseValue(v reflect.Value) reflect.Value {
|
||||
t := v.Type()
|
||||
|
||||
if t == durationType {
|
||||
s := p.consume()
|
||||
d, err := time.ParseDuration(s)
|
||||
p.check(err, "parsing duration")
|
||||
v.Set(reflect.ValueOf(d))
|
||||
return v
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
default:
|
||||
p.stop(fmt.Sprintf("cannot parse type %v", t.Kind()))
|
||||
|
||||
case reflect.Bool:
|
||||
s := p.consume()
|
||||
switch s {
|
||||
case "false":
|
||||
v.SetBool(false)
|
||||
case "true":
|
||||
v.SetBool(true)
|
||||
default:
|
||||
p.stop(fmt.Sprintf("bad boolean value %q", s))
|
||||
}
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s := p.consume()
|
||||
x, err := strconv.ParseInt(s, 10, 64)
|
||||
p.check(err, "parsing integer")
|
||||
v.SetInt(x)
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
s := p.consume()
|
||||
x, err := strconv.ParseUint(s, 10, 64)
|
||||
p.check(err, "parsing integer")
|
||||
v.SetUint(x)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s := p.consume()
|
||||
x, err := strconv.ParseFloat(s, 64)
|
||||
p.check(err, "parsing float")
|
||||
v.SetFloat(x)
|
||||
|
||||
case reflect.String:
|
||||
v.SetString(p.consume())
|
||||
|
||||
case reflect.Slice:
|
||||
v = p.parseSlice(v)
|
||||
|
||||
case reflect.Ptr:
|
||||
vv := reflect.New(t.Elem())
|
||||
p.parseValue(vv.Elem())
|
||||
v.Set(vv)
|
||||
|
||||
case reflect.Struct:
|
||||
p.parseStruct(v)
|
||||
|
||||
case reflect.Map:
|
||||
v = reflect.MakeMap(t)
|
||||
p.parseMap(v)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *parser) parseSlice(v reflect.Value) reflect.Value {
|
||||
if v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
s := p.consume()
|
||||
buf, err := base64.StdEncoding.DecodeString(s)
|
||||
p.check(err, "parsing base64")
|
||||
v.SetBytes(buf)
|
||||
return v
|
||||
}
|
||||
|
||||
p.indent()
|
||||
defer p.unindent()
|
||||
return p.parseSlice0(v)
|
||||
}
|
||||
|
||||
func (p *parser) parseSlice0(v reflect.Value) reflect.Value {
|
||||
for p.next() {
|
||||
s := p.string()
|
||||
prefix := p.prefix + "-"
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
p.stop(fmt.Sprintf("expected item, prefix %q, saw %q", prefix, s))
|
||||
}
|
||||
s = s[len(prefix):]
|
||||
if s != "" {
|
||||
if !strings.HasPrefix(s, " ") {
|
||||
p.stop("missing space after -")
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
p.leave(s)
|
||||
vv := reflect.New(v.Type().Elem()).Elem()
|
||||
vv = p.parseValue(vv)
|
||||
v = reflect.Append(v, vv)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (p *parser) parseStruct(v reflect.Value) {
|
||||
p.indent()
|
||||
defer p.unindent()
|
||||
p.parseStruct0(v)
|
||||
}
|
||||
|
||||
func (p *parser) parseStruct0(v reflect.Value) {
|
||||
seen := map[string]struct{}{}
|
||||
var zeroValue reflect.Value
|
||||
t := v.Type()
|
||||
for p.next() {
|
||||
s := p.string()
|
||||
s = s[len(p.prefix):]
|
||||
l := strings.SplitN(s, ":", 2)
|
||||
if len(l) != 2 {
|
||||
p.stop("missing key: value")
|
||||
}
|
||||
k := l[0]
|
||||
if k == "" {
|
||||
p.stop("empty key")
|
||||
}
|
||||
if _, ok := seen[k]; ok {
|
||||
p.stop("duplicate key")
|
||||
}
|
||||
seen[k] = struct{}{}
|
||||
s = l[1]
|
||||
if s != "" && !strings.HasPrefix(s, " ") {
|
||||
p.stop("no space after colon")
|
||||
}
|
||||
if s != "" {
|
||||
s = s[1:]
|
||||
}
|
||||
p.leave(s)
|
||||
|
||||
vv := v.FieldByName(k)
|
||||
if vv == zeroValue {
|
||||
p.stop(fmt.Sprintf("unknown key %q", k))
|
||||
}
|
||||
if ft, _ := t.FieldByName(k); isIgnore(ft.Tag.Get("sconf")) {
|
||||
p.stop(fmt.Sprintf("unknown key %q (has ignore tag)", k))
|
||||
}
|
||||
vv.Set(p.parseValue(vv))
|
||||
}
|
||||
|
||||
n := t.NumField()
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if isIgnore(f.Tag.Get("sconf")) || isOptional(f.Tag.Get("sconf")) {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[f.Name]; !ok {
|
||||
p.stop(fmt.Sprintf("missing required key %q", f.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parseMap(v reflect.Value) {
|
||||
p.indent()
|
||||
defer p.unindent()
|
||||
p.parseMap0(v)
|
||||
}
|
||||
|
||||
func (p *parser) parseMap0(v reflect.Value) {
|
||||
seen := map[string]struct{}{}
|
||||
t := v.Type()
|
||||
for p.next() {
|
||||
s := p.string()
|
||||
s = s[len(p.prefix):]
|
||||
l := strings.SplitN(s, ":", 2)
|
||||
if len(l) != 2 {
|
||||
p.stop("missing key: value")
|
||||
}
|
||||
k := l[0]
|
||||
if k == "" {
|
||||
p.stop("empty key")
|
||||
}
|
||||
if _, ok := seen[k]; ok {
|
||||
p.stop("duplicate key")
|
||||
}
|
||||
seen[k] = struct{}{}
|
||||
s = l[1]
|
||||
if s != "" && !strings.HasPrefix(s, " ") {
|
||||
p.stop("no space after colon")
|
||||
}
|
||||
if s != "" {
|
||||
s = s[1:]
|
||||
}
|
||||
|
||||
vv := reflect.New(t.Elem()).Elem()
|
||||
if s == "nil" {
|
||||
// Special value "nil" means the zero value, no further parsing of a value.
|
||||
p.leave("")
|
||||
} else {
|
||||
p.leave(s)
|
||||
vv = p.parseValue(vv)
|
||||
}
|
||||
v.SetMapIndex(reflect.ValueOf(k), vv)
|
||||
}
|
||||
}
|
71
vendor/github.com/mjl-/sconf/sconf.go
generated
vendored
Normal file
71
vendor/github.com/mjl-/sconf/sconf.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package sconf
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// ParseFile reads an sconf file from path into dst.
|
||||
func ParseFile(path string, dst interface{}) error {
|
||||
src, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
return parse(path, src, dst)
|
||||
}
|
||||
|
||||
// Parse reads an sconf file from a reader into dst.
|
||||
func Parse(src io.Reader, dst interface{}) error {
|
||||
return parse("", src, dst)
|
||||
}
|
||||
|
||||
// Describe writes an example sconf file describing v to w. The file includes all
|
||||
// fields, values and documentation on the fields as configured with the "sconf"
|
||||
// and "sconf-doc" struct tags. Describe does not detect recursive values and will
|
||||
// attempt to write them.
|
||||
func Describe(w io.Writer, v interface{}) error {
|
||||
return describe(w, v, true, true)
|
||||
}
|
||||
|
||||
// Write writes a valid sconf file describing v to w, without comments, without
|
||||
// zero values of optional fields. Write does not detect recursive values and
|
||||
// will attempt to write them.
|
||||
func Write(w io.Writer, v interface{}) error {
|
||||
return describe(w, v, false, false)
|
||||
}
|
||||
|
||||
// WriteDocs is like Write, but does write comments.
|
||||
func WriteDocs(w io.Writer, v interface{}) error {
|
||||
return describe(w, v, false, true)
|
||||
}
|
||||
|
||||
func describe(w io.Writer, v interface{}, keepZero bool, docs bool) (err error) {
|
||||
value := reflect.ValueOf(v)
|
||||
t := value.Type()
|
||||
if t.Kind() == reflect.Ptr {
|
||||
value = value.Elem()
|
||||
t = value.Type()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("top level object must be a struct, is a %T", v)
|
||||
}
|
||||
defer func() {
|
||||
x := recover()
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
if e, ok := x.(writeError); ok {
|
||||
err = error(e)
|
||||
} else {
|
||||
panic(x)
|
||||
}
|
||||
}()
|
||||
wr := &writer{out: bufio.NewWriter(w), keepZero: keepZero, docs: docs}
|
||||
wr.describeStruct(value)
|
||||
wr.flush()
|
||||
return nil
|
||||
}
|
4
vendor/github.com/mjl-/sherpa/.gitignore
generated
vendored
Normal file
4
vendor/github.com/mjl-/sherpa/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
/cover.out
|
||||
/cover.html
|
||||
|
||||
*\.swp
|
7
vendor/github.com/mjl-/sherpa/LICENSE
generated
vendored
Normal file
7
vendor/github.com/mjl-/sherpa/LICENSE
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
Copyright (c) 2016-2018 Mechiel Lukkien
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
27
vendor/github.com/mjl-/sherpa/LICENSE-go
generated
vendored
Normal file
27
vendor/github.com/mjl-/sherpa/LICENSE-go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
16
vendor/github.com/mjl-/sherpa/Makefile
generated
vendored
Normal file
16
vendor/github.com/mjl-/sherpa/Makefile
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
build:
|
||||
go build ./...
|
||||
go vet ./...
|
||||
|
||||
test:
|
||||
go test -coverprofile=cover.out ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
golint ./...
|
||||
|
||||
coverage:
|
||||
|
||||
clean:
|
||||
go clean ./...
|
||||
|
||||
fmt:
|
||||
go fmt ./...
|
39
vendor/github.com/mjl-/sherpa/README.md
generated
vendored
Normal file
39
vendor/github.com/mjl-/sherpa/README.md
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
# Sherpa
|
||||
|
||||
Sherpa is a Go library for creating a [sherpa API](https://www.ueber.net/who/mjl/sherpa/).
|
||||
|
||||
This library makes it trivial to export Go functions as a sherpa API with an http.Handler.
|
||||
|
||||
Your API will automatically be documented: github.com/mjl-/sherpadoc reads your Go source, and exports function and type comments as API documentation.
|
||||
|
||||
See the [documentation](https://godoc.org/github.com/mjl-/sherpa).
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
A public sherpa API: https://www.sherpadoc.org/#https://www.sherpadoc.org/example/
|
||||
|
||||
That web application is [sherpaweb](https://github.com/mjl-/sherpaweb). It shows documentation for any sherpa API but also includes an API called Example for demo purposes.
|
||||
|
||||
[Ding](https://github.com/mjl-/ding/) is a more elaborate web application built with this library.
|
||||
|
||||
|
||||
# About
|
||||
|
||||
Written by Mechiel Lukkien, mechiel@ueber.net.
|
||||
Bug fixes, patches, comments are welcome.
|
||||
MIT-licensed, see LICENSE.
|
||||
|
||||
|
||||
# todo
|
||||
|
||||
- add a toggle for enabling calls by GET request. turn off by default for functions with parameters, people might be making requests with sensitive information in query strings...
|
||||
- include a sherpaweb-like page that displays the documentation
|
||||
- consider adding input & output validation and timestamp conversion to plain js lib
|
||||
- consider using interfaces with functions (instead of direct structs) for server implementations. haven't needed it yet, but could be useful for mocking an api that you want to talk to.
|
||||
- think about way to keep unknown fields. perhaps use a json lib that collects unknown keys in a map (which has to be added to the object for which you want to keep such keys).
|
||||
- sherpajs: make a versionied, minified variant, with license line
|
||||
- tool for comparing two jsons for compatibility, listing added sections/functions/types/fields
|
||||
- be more helpful around errors that functions can generate. perhaps adding a mechanism for listing which errors can occur in the api json.
|
||||
- handler: write tests
|
||||
- client: write tests
|
19
vendor/github.com/mjl-/sherpa/codes.go
generated
vendored
Normal file
19
vendor/github.com/mjl-/sherpa/codes.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package sherpa
|
||||
|
||||
// Errors generated by both clients and servers
|
||||
const (
|
||||
SherpaBadFunction = "sherpa:badFunction" // Function does not exist at server.
|
||||
)
|
||||
|
||||
// Errors generated by clients
|
||||
const (
|
||||
SherpaBadResponse = "sherpa:badResponse" // Bad response from server, e.g. JSON response body could not be parsed.
|
||||
SherpaHTTPError = "sherpa:http" // Unexpected http response status code from server.
|
||||
SherpaNoAPI = "sherpa:noAPI" // No API was found at this URL.
|
||||
)
|
||||
|
||||
// Errors generated by servers
|
||||
const (
|
||||
SherpaBadRequest = "sherpa:badRequest" // Error parsing JSON request body.
|
||||
SherpaBadParams = "sherpa:badParams" // Wrong number of parameters in function call.
|
||||
)
|
21
vendor/github.com/mjl-/sherpa/collector.go
generated
vendored
Normal file
21
vendor/github.com/mjl-/sherpa/collector.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package sherpa
|
||||
|
||||
// Collector facilitates collection of metrics. Functions are called by the library as such events or errors occur.
|
||||
// See https://github.com/irias/sherpa-prometheus-collector for an implementation for prometheus.
|
||||
type Collector interface {
|
||||
ProtocolError() // Invalid request at protocol-level, e.g. wrong mimetype or request body.
|
||||
BadFunction() // Function does not exist.
|
||||
JavaScript() // Sherpa.js is requested.
|
||||
JSON() // Sherpa.json is requested.
|
||||
|
||||
// Call of function, how long it took, and in case of failure, the error code.
|
||||
FunctionCall(name string, durationSec float64, errorCode string)
|
||||
}
|
||||
|
||||
type ignoreCollector struct{}
|
||||
|
||||
func (ignoreCollector) ProtocolError() {}
|
||||
func (ignoreCollector) BadFunction() {}
|
||||
func (ignoreCollector) JavaScript() {}
|
||||
func (ignoreCollector) JSON() {}
|
||||
func (ignoreCollector) FunctionCall(name string, durationSec float64, errorCode string) {}
|
8
vendor/github.com/mjl-/sherpa/doc.go
generated
vendored
Normal file
8
vendor/github.com/mjl-/sherpa/doc.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
// Package sherpa exports your Go functions as fully documented sherpa web API's.
|
||||
//
|
||||
// Sherpa is similar to JSON-RPC, but discoverable and self-documenting.
|
||||
// Read more at https://www.ueber.net/who/mjl/sherpa/.
|
||||
//
|
||||
// Use sherpa.NewHandler to export Go functions using a http.Handler.
|
||||
// An example of how to use NewHandler can be found in https://github.com/mjl-/sherpaweb/
|
||||
package sherpa
|
653
vendor/github.com/mjl-/sherpa/handler.go
generated
vendored
Normal file
653
vendor/github.com/mjl-/sherpa/handler.go
generated
vendored
Normal file
@ -0,0 +1,653 @@
|
||||
package sherpa
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/mjl-/sherpadoc"
|
||||
)
|
||||
|
||||
// SherpaVersion is the version of the Sherpa protocol this package implements. Sherpa is at version 1.
|
||||
const SherpaVersion = 1
|
||||
|
||||
// JSON holds all fields for a request to sherpa.json.
|
||||
type JSON struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Functions []string `json:"functions"`
|
||||
BaseURL string `json:"baseurl"`
|
||||
Version string `json:"version"`
|
||||
SherpaVersion int `json:"sherpaVersion"`
|
||||
SherpadocVersion int `json:"sherpadocVersion"`
|
||||
}
|
||||
|
||||
// HandlerOpts are options for creating a new handler.
|
||||
type HandlerOpts struct {
|
||||
Collector Collector // Holds functions for collecting metrics about function calls and other incoming HTTP requests. May be nil.
|
||||
LaxParameterParsing bool // If enabled, incoming sherpa function calls will ignore unrecognized fields in struct parameters, instead of failing.
|
||||
AdjustFunctionNames string // If empty, only the first character of function names are lower cased. For "lowerWord", the first string of capitals is lowercased, for "none", the function name is left as is.
|
||||
}
|
||||
|
||||
// Raw signals a raw JSON response.
|
||||
// If a handler panics with this type, the raw bytes are sent (with regular
|
||||
// response headers).
|
||||
// Can be used to skip the json encoding from the handler, eg for caching, or
|
||||
// when you read a properly formatted JSON document from a file or database.
|
||||
// By using panic to signal a raw JSON response, the return types stay intact
|
||||
// for sherpadoc to generate documentation from.
|
||||
type Raw []byte
|
||||
|
||||
// handler that responds to all Sherpa-related requests.
|
||||
type handler struct {
|
||||
path string
|
||||
functions map[string]reflect.Value
|
||||
sherpaJSON *JSON
|
||||
opts HandlerOpts
|
||||
}
|
||||
|
||||
// Error returned by a function called through a sherpa API.
|
||||
// Message is a human-readable error message.
|
||||
// Code is optional, it can be used to handle errors programmatically.
|
||||
type Error struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
// InternalServerError is an error that propagates as an HTTP internal server error (HTTP status 500), instead of returning a regular HTTP status 200 OK with the error message in the response body.
|
||||
// Useful for making Sherpa endpoints that can be monitored by simple HTTP monitoring tools.
|
||||
type InternalServerError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
func (e *InternalServerError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
func (e *InternalServerError) error() *Error {
|
||||
return &Error{"internalServerError", e.Message}
|
||||
}
|
||||
|
||||
// Sherpa API response type
|
||||
type response struct {
|
||||
Result interface{} `json:"result"`
|
||||
Error *Error `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
var htmlTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
htmlTemplate, err = template.New("html").Parse(`<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<title>{{.title}}</title>
|
||||
<style>
|
||||
body { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; line-height:1.4; font-size:16px; color: #333; }
|
||||
a { color: #327CCB; }
|
||||
.code { padding: 2px 4px; font-size: 90%; color: #c7254e; background-color: #f9f2f4; border-radius: 4px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div style="margin:1em auto 1em; max-width:45em">
|
||||
<h1>{{.title}} <span style="font-weight:normal; font-size:0.7em">- version {{.version}}</span></h1>
|
||||
<p>
|
||||
This is the base URL for {{.title}}. The API has been loaded on this page, under variable <span class="code">{{.id}}</span>. So open your browser's developer console and start calling functions!
|
||||
</p>
|
||||
<p>
|
||||
You can also the <a href="{{.docURL}}">read documentation</a> for this API.</p>
|
||||
</p>
|
||||
<p style="text-align: center; font-size:smaller; margin-top:8ex;">
|
||||
<a href="https://github.com/mjl-/sherpa/">go sherpa code</a> |
|
||||
<a href="https://www.ueber.net/who/mjl/sherpa/">sherpa api's</a> |
|
||||
<a href="https://github.com/mjl-/sherpaweb/">sherpaweb code</a>
|
||||
</p>
|
||||
</div>
|
||||
<script src="{{.jsURL}}"></script>
|
||||
</body>
|
||||
</html>`)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getBaseURL(r *http.Request) string {
|
||||
host := r.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = r.Host
|
||||
}
|
||||
scheme := r.Header.Get("X-Forwarded-Proto")
|
||||
if scheme == "" {
|
||||
scheme = "http"
|
||||
}
|
||||
return scheme + "://" + host
|
||||
}
|
||||
|
||||
func respondJSON(w http.ResponseWriter, status int, v interface{}) {
|
||||
respond(w, status, v, false, "")
|
||||
}
|
||||
|
||||
func respond(w http.ResponseWriter, status int, v interface{}, jsonp bool, callback string) {
|
||||
if jsonp {
|
||||
w.Header().Add("Content-Type", "text/javascript; charset=utf-8")
|
||||
} else {
|
||||
w.Header().Add("Content-Type", "application/json; charset=utf-8")
|
||||
}
|
||||
w.WriteHeader(status)
|
||||
var err error
|
||||
if jsonp {
|
||||
_, err = fmt.Fprintf(w, "%s(\n\t", callback)
|
||||
}
|
||||
if raw, ok := v.(Raw); err == nil && ok {
|
||||
_, err = w.Write([]byte(`{"result":`))
|
||||
if err == nil {
|
||||
_, err = w.Write(raw)
|
||||
}
|
||||
if err == nil {
|
||||
_, err = w.Write([]byte("}"))
|
||||
}
|
||||
} else if err == nil && !ok {
|
||||
err = json.NewEncoder(w).Encode(v)
|
||||
}
|
||||
if err == nil && jsonp {
|
||||
_, err = fmt.Fprint(w, ");")
|
||||
}
|
||||
if err != nil && !isConnectionClosed(err) {
|
||||
log.Println("writing response:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Call function fn with a json body read from r.
|
||||
// Ctx is from the http.Request, and is canceled when the http connection goes away.
|
||||
//
|
||||
// on success, the returned interface contains:
|
||||
// - nil, if fn has no return value
|
||||
// - single value, if fn had a single return value
|
||||
// - slice of values, if fn had multiple return values
|
||||
// - Raw, for a preformatted JSON response (caught from panic).
|
||||
//
|
||||
// on error, we always return an Error with the Code field set.
|
||||
func (h *handler) call(ctx context.Context, functionName string, fn reflect.Value, r io.Reader) (ret interface{}, ee error) {
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e == nil {
|
||||
return
|
||||
}
|
||||
|
||||
se, ok := e.(*Error)
|
||||
if ok {
|
||||
ee = se
|
||||
return
|
||||
}
|
||||
ierr, ok := e.(*InternalServerError)
|
||||
if ok {
|
||||
ee = ierr
|
||||
return
|
||||
}
|
||||
if raw, ok := e.(Raw); ok {
|
||||
ret = raw
|
||||
return
|
||||
}
|
||||
panic(e)
|
||||
}()
|
||||
|
||||
lcheck := func(err error, code, message string) {
|
||||
if err != nil {
|
||||
panic(&Error{Code: code, Message: fmt.Sprintf("function %q: %s: %s", functionName, message, err)})
|
||||
}
|
||||
}
|
||||
|
||||
var request struct {
|
||||
Params json.RawMessage `json:"params"`
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(r)
|
||||
dec.DisallowUnknownFields()
|
||||
err := dec.Decode(&request)
|
||||
lcheck(err, SherpaBadRequest, "invalid JSON request body")
|
||||
|
||||
fnt := fn.Type()
|
||||
|
||||
var params []interface{}
|
||||
err = json.Unmarshal(request.Params, ¶ms)
|
||||
lcheck(err, SherpaBadRequest, "invalid JSON request body")
|
||||
|
||||
needArgs := fnt.NumIn()
|
||||
needValues := needArgs
|
||||
ctxType := reflect.TypeOf((*context.Context)(nil)).Elem()
|
||||
needsContext := needValues > 0 && fnt.In(0).Implements(ctxType)
|
||||
if needsContext {
|
||||
needArgs--
|
||||
}
|
||||
if fnt.IsVariadic() {
|
||||
if len(params) != needArgs-1 && len(params) != needArgs {
|
||||
err = fmt.Errorf("got %d, want %d or %d", len(params), needArgs-1, needArgs)
|
||||
}
|
||||
} else {
|
||||
if len(params) != needArgs {
|
||||
err = fmt.Errorf("got %d, want %d", len(params), needArgs)
|
||||
}
|
||||
}
|
||||
lcheck(err, SherpaBadParams, "bad number of parameters")
|
||||
|
||||
values := make([]reflect.Value, needValues)
|
||||
o := 0
|
||||
if needsContext {
|
||||
values[0] = reflect.ValueOf(ctx)
|
||||
o = 1
|
||||
}
|
||||
args := make([]interface{}, needArgs)
|
||||
for i := range args {
|
||||
n := reflect.New(fnt.In(o + i))
|
||||
values[o+i] = n.Elem()
|
||||
args[i] = n.Interface()
|
||||
}
|
||||
|
||||
dec = json.NewDecoder(bytes.NewReader(request.Params))
|
||||
if !h.opts.LaxParameterParsing {
|
||||
dec.DisallowUnknownFields()
|
||||
}
|
||||
err = dec.Decode(&args)
|
||||
lcheck(err, SherpaBadParams, "parsing parameters")
|
||||
|
||||
errorType := reflect.TypeOf((*error)(nil)).Elem()
|
||||
checkError := fnt.NumOut() > 0 && fnt.Out(fnt.NumOut()-1).Implements(errorType)
|
||||
|
||||
var results []reflect.Value
|
||||
if fnt.IsVariadic() {
|
||||
results = fn.CallSlice(values)
|
||||
} else {
|
||||
results = fn.Call(values)
|
||||
}
|
||||
if len(results) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rr := make([]interface{}, len(results))
|
||||
for i, v := range results {
|
||||
rr[i] = v.Interface()
|
||||
}
|
||||
if !checkError {
|
||||
if len(rr) == 1 {
|
||||
return rr[0], nil
|
||||
}
|
||||
return rr, nil
|
||||
}
|
||||
rr, rerr := rr[:len(rr)-1], rr[len(rr)-1]
|
||||
var rv interface{} = rr
|
||||
switch len(rr) {
|
||||
case 0:
|
||||
rv = nil
|
||||
case 1:
|
||||
rv = rr[0]
|
||||
}
|
||||
if rerr == nil {
|
||||
return rv, nil
|
||||
}
|
||||
switch r := rerr.(type) {
|
||||
case *Error:
|
||||
return nil, r
|
||||
case *InternalServerError:
|
||||
return nil, r
|
||||
case error:
|
||||
return nil, &Error{Message: r.Error()}
|
||||
default:
|
||||
panic("checkError while type is not error")
|
||||
}
|
||||
}
|
||||
|
||||
func adjustFunctionNameCapitals(s string, opts HandlerOpts) string {
|
||||
switch opts.AdjustFunctionNames {
|
||||
case "":
|
||||
return strings.ToLower(s[:1]) + s[1:]
|
||||
case "none":
|
||||
return s
|
||||
case "lowerWord":
|
||||
r := ""
|
||||
for i, c := range s {
|
||||
lc := unicode.ToLower(c)
|
||||
if lc == c {
|
||||
r += s[i:]
|
||||
break
|
||||
}
|
||||
r += string(lc)
|
||||
}
|
||||
return r
|
||||
default:
|
||||
panic(fmt.Sprintf("bad value for AdjustFunctionNames: %q", opts.AdjustFunctionNames))
|
||||
}
|
||||
}
|
||||
|
||||
func gatherFunctions(functions map[string]reflect.Value, t reflect.Type, v reflect.Value, opts HandlerOpts) error {
|
||||
if t.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("sherpa sections must be a struct (not a ptr)")
|
||||
}
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
name := adjustFunctionNameCapitals(t.Method(i).Name, opts)
|
||||
m := v.Method(i)
|
||||
if _, ok := functions[name]; ok {
|
||||
return fmt.Errorf("duplicate function %s", name)
|
||||
}
|
||||
functions[name] = m
|
||||
}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
err := gatherFunctions(functions, t.Field(i).Type, v.Field(i), opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewHandler returns a new http.Handler that serves all Sherpa API-related requests.
|
||||
//
|
||||
// Path is the path this API is available at.
|
||||
//
|
||||
// Version should be a semantic version.
|
||||
//
|
||||
// API should by a struct. It represents the root section. All methods of a
|
||||
// section are exported as sherpa functions. All fields must be other sections
|
||||
// (structs) whose methods are also exported. recursively. Method names must
|
||||
// start with an uppercase character to be exported, but their exported names
|
||||
// start with a lowercase character by default (but see HandlerOpts.AdjustFunctionNames).
|
||||
//
|
||||
// Doc is documentation for the top-level sherpa section, as generated by sherpadoc.
|
||||
//
|
||||
// Opts allows further configuration of the handler.
|
||||
//
|
||||
// Methods on the exported sections are exported as Sherpa functions.
|
||||
// If the first parameter of a method is a context.Context, the context from the HTTP request is passed.
|
||||
// This lets you abort work if the HTTP request underlying the function call disappears.
|
||||
//
|
||||
// Parameters and return values for exported functions are automatically converted from/to JSON.
|
||||
// If the last element of a return value (if any) is an error,
|
||||
// that error field is taken to indicate whether the call succeeded.
|
||||
// Exported functions can also panic with an *Error or *InternalServerError to indicate a failed function call.
|
||||
// Returning an error with a Code starting with "server" indicates an implementation error, which will be logged through the collector.
|
||||
//
|
||||
// Variadic functions can be called, but in the call (from the client), the variadic parameters must be passed in as an array.
|
||||
//
|
||||
// This handler strips "path" from the request.
|
||||
func NewHandler(path string, version string, api interface{}, doc *sherpadoc.Section, opts *HandlerOpts) (http.Handler, error) {
|
||||
var xopts HandlerOpts
|
||||
if opts != nil {
|
||||
xopts = *opts
|
||||
}
|
||||
if xopts.Collector == nil {
|
||||
// We always want to have a collector, so we don't have to check for nil all the time when calling.
|
||||
xopts.Collector = ignoreCollector{}
|
||||
}
|
||||
|
||||
doc.Version = version
|
||||
doc.SherpaVersion = SherpaVersion
|
||||
functions := map[string]reflect.Value{
|
||||
"_docs": reflect.ValueOf(func() *sherpadoc.Section {
|
||||
return doc
|
||||
}),
|
||||
}
|
||||
err := gatherFunctions(functions, reflect.TypeOf(api), reflect.ValueOf(api), xopts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
names := make([]string, 0, len(functions))
|
||||
for name := range functions {
|
||||
names = append(names, name)
|
||||
}
|
||||
|
||||
elems := strings.Split(strings.Trim(path, "/"), "/")
|
||||
id := elems[len(elems)-1]
|
||||
sherpaJSON := &JSON{
|
||||
ID: id,
|
||||
Title: doc.Name,
|
||||
Functions: names,
|
||||
BaseURL: "", // filled in during request
|
||||
Version: version,
|
||||
SherpaVersion: SherpaVersion,
|
||||
SherpadocVersion: doc.SherpadocVersion,
|
||||
}
|
||||
h := http.StripPrefix(path, &handler{
|
||||
path: path,
|
||||
functions: functions,
|
||||
sherpaJSON: sherpaJSON,
|
||||
opts: xopts,
|
||||
})
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func badMethod(w http.ResponseWriter) {
|
||||
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
// return whether callback js snippet is valid.
|
||||
// this is a coarse test. we disallow some valid js identifiers, like "\u03c0",
|
||||
// and we allow many invalid ones, such as js keywords, "0intro" and identifiers starting/ending with ".", or having multiple dots.
|
||||
func validCallback(cb string) bool {
|
||||
if cb == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range cb {
|
||||
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c == '_' || c == '$' || c == '.' {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Serve a HTTP request for this Sherpa API.
|
||||
// ServeHTTP expects the request path is stripped from the path it was mounted at with the http package.
|
||||
//
|
||||
// The following endpoints are handled:
|
||||
// - sherpa.json, describing this API.
|
||||
// - sherpa.js, a small stand-alone client JavaScript library that makes it trivial to start using this API from a browser.
|
||||
// - functionName, for function invocations on this API.
|
||||
//
|
||||
// HTTP response will have CORS-headers set, and support the OPTIONS HTTP method.
|
||||
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
defer r.Body.Close()
|
||||
hdr := w.Header()
|
||||
hdr.Set("Access-Control-Allow-Origin", "*")
|
||||
hdr.Set("Access-Control-Allow-Methods", "GET, POST")
|
||||
hdr.Set("Access-Control-Allow-Headers", "Content-Type")
|
||||
|
||||
collector := h.opts.Collector
|
||||
|
||||
switch {
|
||||
case r.URL.Path == "":
|
||||
baseURL := getBaseURL(r) + h.path
|
||||
docURL := "https://www.sherpadoc.org/#" + baseURL
|
||||
err := htmlTemplate.Execute(w, map[string]interface{}{
|
||||
"id": h.sherpaJSON.ID,
|
||||
"title": h.sherpaJSON.Title,
|
||||
"version": h.sherpaJSON.Version,
|
||||
"docURL": docURL,
|
||||
"jsURL": baseURL + "sherpa.js",
|
||||
})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
case r.URL.Path == "sherpa.json":
|
||||
switch r.Method {
|
||||
case "OPTIONS":
|
||||
w.WriteHeader(204)
|
||||
case "GET":
|
||||
collector.JSON()
|
||||
hdr.Set("Content-Type", "application/json; charset=utf-8")
|
||||
hdr.Set("Cache-Control", "no-cache")
|
||||
sherpaJSON := &*h.sherpaJSON
|
||||
sherpaJSON.BaseURL = getBaseURL(r) + h.path
|
||||
err := json.NewEncoder(w).Encode(sherpaJSON)
|
||||
if err != nil {
|
||||
log.Println("writing sherpa.json response:", err)
|
||||
}
|
||||
default:
|
||||
badMethod(w)
|
||||
}
|
||||
|
||||
case r.URL.Path == "sherpa.js":
|
||||
if r.Method != "GET" {
|
||||
badMethod(w)
|
||||
return
|
||||
}
|
||||
collector.JavaScript()
|
||||
hdr.Set("Content-Type", "text/javascript; charset=utf-8")
|
||||
hdr.Set("Cache-Control", "no-cache")
|
||||
sherpaJSON := &*h.sherpaJSON
|
||||
sherpaJSON.BaseURL = getBaseURL(r) + h.path
|
||||
buf, err := json.Marshal(sherpaJSON)
|
||||
js := strings.Replace(sherpaJS, "{{.sherpaJSON}}", string(buf), -1)
|
||||
_, err = w.Write([]byte(js))
|
||||
if err != nil {
|
||||
log.Println("writing sherpa.js response:", err)
|
||||
}
|
||||
|
||||
default:
|
||||
name := r.URL.Path
|
||||
fn, ok := h.functions[name]
|
||||
switch r.Method {
|
||||
case "OPTIONS":
|
||||
w.WriteHeader(204)
|
||||
|
||||
case "POST":
|
||||
hdr.Set("Cache-Control", "no-store")
|
||||
|
||||
if !ok {
|
||||
collector.BadFunction()
|
||||
respondJSON(w, 404, &response{Error: &Error{Code: SherpaBadFunction, Message: fmt.Sprintf("function %q does not exist", name)}})
|
||||
return
|
||||
}
|
||||
|
||||
ct := r.Header.Get("Content-Type")
|
||||
if ct == "" {
|
||||
collector.ProtocolError()
|
||||
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("missing content-type")}})
|
||||
return
|
||||
}
|
||||
mt, mtparams, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
collector.ProtocolError()
|
||||
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("invalid content-type %q", ct)}})
|
||||
return
|
||||
}
|
||||
if mt != "application/json" {
|
||||
collector.ProtocolError()
|
||||
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`unrecognized content-type %q, expecting "application/json"`, mt)}})
|
||||
return
|
||||
}
|
||||
charset, ok := mtparams["charset"]
|
||||
if ok && strings.ToLower(charset) != "utf-8" {
|
||||
collector.ProtocolError()
|
||||
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`unexpected charset %q, expecting "utf-8"`, charset)}})
|
||||
return
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
r, xerr := h.call(r.Context(), name, fn, r.Body)
|
||||
durationSec := float64(time.Now().Sub(t0)) / float64(time.Second)
|
||||
if xerr != nil {
|
||||
switch err := xerr.(type) {
|
||||
case *InternalServerError:
|
||||
collector.FunctionCall(name, durationSec, err.Code)
|
||||
respondJSON(w, 500, &response{Error: err.error()})
|
||||
case *Error:
|
||||
collector.FunctionCall(name, durationSec, err.Code)
|
||||
respondJSON(w, 200, &response{Error: err})
|
||||
default:
|
||||
collector.FunctionCall(name, durationSec, "server:panic")
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
var v interface{}
|
||||
if raw, ok := r.(Raw); ok {
|
||||
v = raw
|
||||
} else {
|
||||
v = &response{Result: r}
|
||||
}
|
||||
collector.FunctionCall(name, durationSec, "")
|
||||
respondJSON(w, 200, v)
|
||||
}
|
||||
|
||||
case "GET":
|
||||
hdr.Set("Cache-Control", "no-store")
|
||||
|
||||
jsonp := false
|
||||
if !ok {
|
||||
collector.BadFunction()
|
||||
respondJSON(w, 404, &response{Error: &Error{Code: SherpaBadFunction, Message: fmt.Sprintf("function %q does not exist", name)}})
|
||||
return
|
||||
}
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
collector.ProtocolError()
|
||||
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("could not parse query string")}})
|
||||
return
|
||||
}
|
||||
|
||||
callback := r.Form.Get("callback")
|
||||
_, ok := r.Form["callback"]
|
||||
if ok {
|
||||
if !validCallback(callback) {
|
||||
collector.ProtocolError()
|
||||
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`invalid callback name %q`, callback)}})
|
||||
return
|
||||
}
|
||||
jsonp = true
|
||||
}
|
||||
|
||||
// We allow an empty list to be missing to make it cleaner & easier to call health check functions (no ugly urls).
|
||||
body := r.Form.Get("body")
|
||||
_, ok = r.Form["body"]
|
||||
if !ok {
|
||||
body = `{"params": []}`
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
r, xerr := h.call(r.Context(), name, fn, strings.NewReader(body))
|
||||
durationSec := float64(time.Now().Sub(t0)) / float64(time.Second)
|
||||
if xerr != nil {
|
||||
switch err := xerr.(type) {
|
||||
case *InternalServerError:
|
||||
collector.FunctionCall(name, durationSec, err.Code)
|
||||
respond(w, 500, &response{Error: err.error()}, jsonp, callback)
|
||||
case *Error:
|
||||
collector.FunctionCall(name, durationSec, err.Code)
|
||||
respond(w, 200, &response{Error: err}, jsonp, callback)
|
||||
default:
|
||||
collector.FunctionCall(name, durationSec, "server:panic")
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
var v interface{}
|
||||
if raw, ok := r.(Raw); ok {
|
||||
v = raw
|
||||
} else {
|
||||
v = &response{Result: r}
|
||||
}
|
||||
collector.FunctionCall(name, durationSec, "")
|
||||
respond(w, 200, v, jsonp, callback)
|
||||
}
|
||||
|
||||
default:
|
||||
badMethod(w)
|
||||
}
|
||||
}
|
||||
}
|
87
vendor/github.com/mjl-/sherpa/intstr.go
generated
vendored
Normal file
87
vendor/github.com/mjl-/sherpa/intstr.go
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package sherpa
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Int64s is an int64 that can be read as either a JSON string or JSON number, to
|
||||
// be used in sherpa function parameters for compatibility with JavaScript.
|
||||
// For struct fields, use the "json:,string" struct tag instead.
|
||||
type Int64s int64
|
||||
|
||||
// Int returns the int64 value.
|
||||
func (i Int64s) Int() int64 {
|
||||
return int64(i)
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON-string-encoding of the int64.
|
||||
func (i *Int64s) MarshalJSON() ([]byte, error) {
|
||||
var v int64
|
||||
if i != nil {
|
||||
v = int64(*i)
|
||||
}
|
||||
return json.Marshal(fmt.Sprintf("%d", v))
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses JSON into the int64. Both a string encoding as a number
|
||||
// encoding are allowed. JavaScript clients must use the string encoding because
|
||||
// the number encoding loses precision at 1<<53.
|
||||
func (i *Int64s) UnmarshalJSON(buf []byte) error {
|
||||
var s string
|
||||
if len(buf) > 0 && buf[0] == '"' {
|
||||
err := json.Unmarshal(buf, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
s = string(buf)
|
||||
}
|
||||
vv, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = Int64s(vv)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Uint64s is an uint64 that can be read as either a JSON string or JSON number, to
|
||||
// be used in sherpa function parameters for compatibility with JavaScript.
|
||||
// For struct fields, use the "json:,string" struct tag instead.
|
||||
type Uint64s uint64
|
||||
|
||||
// Int returns the uint64 value.
|
||||
func (i Uint64s) Int() uint64 {
|
||||
return uint64(i)
|
||||
}
|
||||
|
||||
// MarshalJSON returns a JSON-string-encoding of the uint64.
|
||||
func (i *Uint64s) MarshalJSON() ([]byte, error) {
|
||||
var v uint64
|
||||
if i != nil {
|
||||
v = uint64(*i)
|
||||
}
|
||||
return json.Marshal(fmt.Sprintf("%d", v))
|
||||
}
|
||||
|
||||
// UnmarshalJSON parses JSON into the uint64. Both a string encoding as a number
|
||||
// encoding are allowed. JavaScript clients must use the string encoding because
|
||||
// the number encoding loses precision at 1<<53.
|
||||
func (i *Uint64s) UnmarshalJSON(buf []byte) error {
|
||||
var s string
|
||||
if len(buf) > 0 && buf[0] == '"' {
|
||||
err := json.Unmarshal(buf, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
s = string(buf)
|
||||
}
|
||||
vv, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = Uint64s(vv)
|
||||
return nil
|
||||
}
|
13
vendor/github.com/mjl-/sherpa/isclosed.go
generated
vendored
Normal file
13
vendor/github.com/mjl-/sherpa/isclosed.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sherpa
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func isConnectionClosed(err error) bool {
|
||||
return errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET)
|
||||
}
|
6
vendor/github.com/mjl-/sherpa/isclosed_plan9.go
generated
vendored
Normal file
6
vendor/github.com/mjl-/sherpa/isclosed_plan9.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
package sherpa
|
||||
|
||||
func isConnectionClosed(err error) bool {
|
||||
// todo: needs a better test
|
||||
return false
|
||||
}
|
136
vendor/github.com/mjl-/sherpa/sherpajs.go
generated
vendored
Normal file
136
vendor/github.com/mjl-/sherpa/sherpajs.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
package sherpa
|
||||
|
||||
var sherpaJS = `
|
||||
'use strict';
|
||||
|
||||
(function(undefined) {
|
||||
|
||||
var sherpa = {};
|
||||
|
||||
// prepare basic support for promises.
|
||||
// we return functions with a "then" method only. our "then" isn't chainable. and you don't get other promise-related methods.
|
||||
// but this "then" is enough so your browser's promise library (or a polyfill) can turn it into a real promise.
|
||||
function thenable(fn) {
|
||||
var settled = false;
|
||||
var fulfilled = false;
|
||||
var result = null;
|
||||
|
||||
var goods = [];
|
||||
var bads = [];
|
||||
|
||||
// promise lib will call the returned function, make it the same as our .then function
|
||||
var nfn = function(goodfn, badfn) {
|
||||
if(settled) {
|
||||
if(fulfilled && goodfn) {
|
||||
goodfn(result);
|
||||
}
|
||||
if(!fulfilled && badfn) {
|
||||
badfn(result);
|
||||
}
|
||||
} else {
|
||||
if(goodfn) {
|
||||
goods.push(goodfn);
|
||||
}
|
||||
if(badfn) {
|
||||
bads.push(badfn);
|
||||
}
|
||||
}
|
||||
};
|
||||
nfn.then = nfn;
|
||||
|
||||
function done() {
|
||||
while(fulfilled && goods.length > 0) {
|
||||
goods.shift()(result);
|
||||
}
|
||||
while(!fulfilled && bads.length > 0) {
|
||||
bads.shift()(result);
|
||||
}
|
||||
}
|
||||
|
||||
function makeSettle(xfulfilled) {
|
||||
return function(arg) {
|
||||
if(settled) {
|
||||
return;
|
||||
}
|
||||
settled = true;
|
||||
fulfilled = xfulfilled;
|
||||
result = arg;
|
||||
done();
|
||||
};
|
||||
}
|
||||
var resolve = makeSettle(true);
|
||||
var reject = makeSettle(false);
|
||||
try {
|
||||
fn(resolve, reject);
|
||||
} catch(e) {
|
||||
reject(e);
|
||||
}
|
||||
return nfn;
|
||||
}
|
||||
|
||||
function postJSON(url, param, success, error) {
|
||||
var req = new window.XMLHttpRequest();
|
||||
req.open('POST', url, true);
|
||||
req.onload = function onload() {
|
||||
if(req.status >= 200 && req.status < 400) {
|
||||
success(JSON.parse(req.responseText));
|
||||
} else {
|
||||
if(req.status === 404) {
|
||||
error({code: 'sherpaBadFunction', message: 'function does not exist'});
|
||||
} else {
|
||||
error({code: 'sherpaHttpError', message: 'error calling function, HTTP status: '+req.status});
|
||||
}
|
||||
}
|
||||
};
|
||||
req.onerror = function onerror() {
|
||||
error({code: 'sherpaClientError', message: 'connection failed'});
|
||||
};
|
||||
req.setRequestHeader('Content-Type', 'application/json');
|
||||
req.send(JSON.stringify(param));
|
||||
}
|
||||
|
||||
function makeFunction(api, name) {
|
||||
return function() {
|
||||
var params = Array.prototype.slice.call(arguments, 0);
|
||||
return api._wrapThenable(thenable(function(resolve, reject) {
|
||||
postJSON(api._sherpa.baseurl+name, {params: params}, function(response) {
|
||||
if(response && response.error) {
|
||||
reject(response.error);
|
||||
} else if(response && response.hasOwnProperty('result')) {
|
||||
resolve(response.result);
|
||||
} else {
|
||||
reject({code: 'sherpaBadResponse', message: "invalid sherpa response object, missing 'result'"});
|
||||
}
|
||||
}, reject);
|
||||
}));
|
||||
};
|
||||
}
|
||||
|
||||
sherpa.init = function init(_sherpa) {
|
||||
var api = {};
|
||||
|
||||
function _wrapThenable(thenable) {
|
||||
return thenable;
|
||||
}
|
||||
|
||||
function _call(name) {
|
||||
return makeFunction(api, name).apply(Array.prototype.slice.call(arguments, 1));
|
||||
}
|
||||
|
||||
api._sherpa = _sherpa;
|
||||
api._wrapThenable = _wrapThenable;
|
||||
api._call = _call;
|
||||
for(var i = 0; i < _sherpa.functions.length; i++) {
|
||||
var fn = _sherpa.functions[i];
|
||||
api[fn] = makeFunction(api, fn);
|
||||
}
|
||||
|
||||
return api;
|
||||
};
|
||||
|
||||
|
||||
var _sherpa = {{.sherpaJSON}};
|
||||
window[_sherpa.id] = sherpa.init(_sherpa);
|
||||
|
||||
})();
|
||||
`
|
7
vendor/github.com/mjl-/sherpadoc/LICENSE
generated
vendored
Normal file
7
vendor/github.com/mjl-/sherpadoc/LICENSE
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
Copyright (c) 2016-2019 Mechiel Lukkien
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
28
vendor/github.com/mjl-/sherpadoc/README.txt
generated
vendored
Normal file
28
vendor/github.com/mjl-/sherpadoc/README.txt
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
sherpadoc - documentation for sherpa API's
|
||||
|
||||
Go package containing type defintions for sherpa documentation for encoding to and decoding from json.
|
||||
Also contains the sherpadoc command reads Go code and writes sherpadoc JSON.
|
||||
|
||||
Use together with the sherpa library, github.com/mjl-/sherpa.
|
||||
Read more about sherpa at https://www.ueber.net/who/mjl/sherpa/
|
||||
|
||||
# About
|
||||
|
||||
Written by Mechiel Lukkien, mechiel@ueber.net.
|
||||
Bug fixes, patches, comments are welcome.
|
||||
MIT-licensed, see LICENSE.
|
||||
|
||||
# todo
|
||||
|
||||
- major cleanup required. too much parsing is done that can probably be handled by the go/* packages.
|
||||
- check that all cases of embedding work
|
||||
- check that all cross-package referencing (ast.SelectorExpr) works
|
||||
- better cli syntax for replacements, and always replace based on fully qualified names. currently you need to specify both the fully qualified and unqualified type paths.
|
||||
- see if order of items in output depends on a map somewhere, i've seen diffs for generated jsons where a type was only moved, not modified.
|
||||
- better error messages and error handling, stricter parsing
|
||||
- support type aliases
|
||||
- support plain iota enums? currently only simple literals are supported for enums.
|
||||
- support complete expressions for enum consts?
|
||||
- find out which go constructs people want to use that aren't yet implemented by sherpadoc
|
||||
- when to make a field nullable. when omitempty is set? (currently yes), when field is a pointer type (currently yes). should we have a way to prevent nullable without omitempty set, or make field a pointer without it being nullable?
|
||||
- write tests
|
166
vendor/github.com/mjl-/sherpadoc/check.go
generated
vendored
Normal file
166
vendor/github.com/mjl-/sherpadoc/check.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
package sherpadoc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type genError struct{ error }
|
||||
|
||||
func parseError(path string, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
err := fmt.Errorf("invalid sherpadoc at %s: %s", path, msg)
|
||||
panic(genError{err})
|
||||
}
|
||||
|
||||
func makePath(path string, field string, index int, name string) string {
|
||||
return fmt.Sprintf("%s.%s[%d (%q)]", path, field, index, name)
|
||||
}
|
||||
|
||||
// NOTE: sherpaweb/ts/parse.ts and sherpadoc/check.go contain the same checking.
|
||||
// The code is very similar. Best keep it in sync and modify the implementations in tandem.
|
||||
type checker struct {
|
||||
types map[string]struct{}
|
||||
functions map[string]struct{}
|
||||
}
|
||||
|
||||
func (c checker) markIdent(path, ident string) {
|
||||
if _, ok := c.types[ident]; ok {
|
||||
parseError(path, "duplicate type %q", ident)
|
||||
}
|
||||
c.types[ident] = struct{}{}
|
||||
}
|
||||
|
||||
func (c checker) walkTypeNames(path string, sec *Section) {
|
||||
for i, t := range sec.Structs {
|
||||
c.markIdent(makePath(path, "Structs", i, t.Name), t.Name)
|
||||
}
|
||||
for i, t := range sec.Ints {
|
||||
npath := makePath(path, "Ints", i, t.Name)
|
||||
c.markIdent(npath, t.Name)
|
||||
for j, v := range t.Values {
|
||||
c.markIdent(makePath(npath, "Values", j, v.Name), v.Name)
|
||||
}
|
||||
}
|
||||
for i, t := range sec.Strings {
|
||||
npath := makePath(path, "Strings", i, t.Name)
|
||||
c.markIdent(npath, t.Name)
|
||||
for j, v := range t.Values {
|
||||
c.markIdent(makePath(npath, "Values", j, v.Name), v.Name)
|
||||
}
|
||||
}
|
||||
for i, subsec := range sec.Sections {
|
||||
c.walkTypeNames(makePath(path, "Sections", i, subsec.Name), subsec)
|
||||
}
|
||||
}
|
||||
|
||||
func (c checker) walkFunctionNames(path string, sec *Section) {
|
||||
for i, fn := range sec.Functions {
|
||||
npath := makePath(path, "Functions", i, fn.Name)
|
||||
if _, ok := c.functions[fn.Name]; ok {
|
||||
parseError(npath, "duplicate function %q", fn.Name)
|
||||
}
|
||||
c.functions[fn.Name] = struct{}{}
|
||||
|
||||
paramNames := map[string]struct{}{}
|
||||
for i, arg := range fn.Params {
|
||||
if _, ok := paramNames[arg.Name]; ok {
|
||||
parseError(makePath(npath, "Params", i, arg.Name), "duplicate parameter name")
|
||||
}
|
||||
paramNames[arg.Name] = struct{}{}
|
||||
}
|
||||
|
||||
returnNames := map[string]struct{}{}
|
||||
for i, arg := range fn.Returns {
|
||||
if _, ok := returnNames[arg.Name]; ok {
|
||||
parseError(makePath(npath, "Returns", i, arg.Name), "duplicate return name")
|
||||
}
|
||||
returnNames[arg.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
for i, subsec := range sec.Sections {
|
||||
c.walkFunctionNames(makePath(path, "Sections", i, subsec.Name), subsec)
|
||||
}
|
||||
}
|
||||
|
||||
func (c checker) checkTypewords(path string, tokens []string, okNullable bool) {
|
||||
if len(tokens) == 0 {
|
||||
parseError(path, "unexpected end of typewords")
|
||||
}
|
||||
t := tokens[0]
|
||||
tokens = tokens[1:]
|
||||
switch t {
|
||||
case "nullable":
|
||||
if !okNullable {
|
||||
parseError(path, "repeated nullable in typewords")
|
||||
}
|
||||
if len(tokens) == 0 {
|
||||
parseError(path, "missing typeword after %#v", t)
|
||||
}
|
||||
c.checkTypewords(path, tokens, false)
|
||||
case "any", "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "int64s", "uint64s", "float32", "float64", "string", "timestamp":
|
||||
if len(tokens) != 0 {
|
||||
parseError(path, "leftover typewords %v", tokens)
|
||||
}
|
||||
case "[]", "{}":
|
||||
if len(tokens) == 0 {
|
||||
parseError(path, "missing typeword after %#v", t)
|
||||
}
|
||||
c.checkTypewords(path, tokens, true)
|
||||
default:
|
||||
_, ok := c.types[t]
|
||||
if !ok {
|
||||
parseError(path, "referenced type %q does not exist", t)
|
||||
}
|
||||
if len(tokens) != 0 {
|
||||
parseError(path, "leftover typewords %v", tokens)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c checker) walkTypewords(path string, sec *Section) {
|
||||
for i, t := range sec.Structs {
|
||||
npath := makePath(path, "Structs", i, t.Name)
|
||||
for j, f := range t.Fields {
|
||||
c.checkTypewords(makePath(npath, "Fields", j, f.Name), f.Typewords, true)
|
||||
}
|
||||
}
|
||||
for i, fn := range sec.Functions {
|
||||
npath := makePath(path, "Functions", i, fn.Name)
|
||||
for j, arg := range fn.Params {
|
||||
c.checkTypewords(makePath(npath, "Params", j, arg.Name), arg.Typewords, true)
|
||||
}
|
||||
for j, arg := range fn.Returns {
|
||||
c.checkTypewords(makePath(npath, "Returns", j, arg.Name), arg.Typewords, true)
|
||||
}
|
||||
}
|
||||
for i, subsec := range sec.Sections {
|
||||
c.walkTypewords(makePath(path, "Sections", i, subsec.Name), subsec)
|
||||
}
|
||||
}
|
||||
|
||||
// Check walks the sherpa section and checks it for correctness. It checks for:
|
||||
//
|
||||
// - Duplicate type names.
|
||||
// - Duplicate parameter or return names.
|
||||
// - References to types that are not defined.
|
||||
// - Validity of typewords.
|
||||
func Check(doc *Section) (retErr error) {
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
g, ok := e.(genError)
|
||||
if !ok {
|
||||
panic(e)
|
||||
}
|
||||
retErr = error(g)
|
||||
}
|
||||
}()
|
||||
|
||||
c := checker{map[string]struct{}{}, map[string]struct{}{}}
|
||||
|
||||
c.walkTypeNames("", doc)
|
||||
c.walkFunctionNames("", doc)
|
||||
c.walkTypewords("", doc)
|
||||
|
||||
return nil
|
||||
}
|
270
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/main.go
generated
vendored
Normal file
270
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/main.go
generated
vendored
Normal file
@ -0,0 +1,270 @@
|
||||
/*
|
||||
Sherpadoc parses Go code and outputs sherpa documentation in JSON.
|
||||
|
||||
This documentation is provided to the sherpa HTTP handler to serve
|
||||
as documentation through the _docs function.
|
||||
|
||||
Example:
|
||||
|
||||
sherpadoc Awesome >awesome.json
|
||||
|
||||
Sherpadoc parses Go code, finds a struct named "Awesome", and gathers
|
||||
documentation:
|
||||
|
||||
Comments above the struct are used as section documentation. Fields
|
||||
in section structs must are treated as subsections, and can in turn
|
||||
contain subsections. These subsections and their methods are also
|
||||
exported and documented in the sherpa API. Add a struct tag "sherpa"
|
||||
to override the name of the subsection, for example `sherpa:"Another
|
||||
Awesome API"`.
|
||||
|
||||
Comments above method names are function documentation. A synopsis
|
||||
is automatically generated.
|
||||
|
||||
Types used as parameters or return values are added to the section
|
||||
documentation where they are used. The comments above the type are
|
||||
used, as well as the comments for each field in a struct. The
|
||||
documented field names know about the "json" struct field tags.
|
||||
|
||||
More eloborate example:
|
||||
|
||||
sherpadoc
|
||||
-title 'Awesome API by mjl' \
|
||||
-replace 'pkg.Type string,example.com/some/pkg.SomeType [] string' \
|
||||
path/to/awesome/code Awesome \
|
||||
>awesome.json
|
||||
|
||||
Most common Go code patterns for API functions have been implemented
|
||||
in sherpadoc, but you may run into missing support.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/sherpadoc"
|
||||
|
||||
"golang.org/x/mod/modfile"
|
||||
)
|
||||
|
||||
var (
|
||||
packagePath = flag.String("package-path", ".", "of source code to parse")
|
||||
replace = flag.String("replace", "", "comma-separated list of type replacements, e.g. \"somepkg.SomeType string\"")
|
||||
title = flag.String("title", "", "title of the API, default is the name of the type of the main API")
|
||||
adjustFunctionNames = flag.String("adjust-function-names", "", `by default, the first character of function names is turned into lower case; with "lowerWord" the first string of upper case characters is lower cased, with "none" the name is left as is`)
|
||||
)
|
||||
|
||||
// If there is a "vendor" directory, we'll load packages from there (instead of
|
||||
// through (slower) packages.Load), and we need to know the module name to resolve
|
||||
// imports to paths in vendor.
|
||||
var (
|
||||
gomodFile *modfile.File
|
||||
gomodDir string
|
||||
)
|
||||
|
||||
type field struct {
|
||||
Name string
|
||||
Typewords []string
|
||||
Doc string
|
||||
Fields []*field
|
||||
}
|
||||
|
||||
func (f field) TypeString() string {
|
||||
t := []string{}
|
||||
for _, e := range f.Typewords {
|
||||
if e == "nullable" {
|
||||
e = "*"
|
||||
}
|
||||
t = append(t, e)
|
||||
}
|
||||
return strings.Join(t, "")
|
||||
}
|
||||
|
||||
type typeKind int
|
||||
|
||||
const (
|
||||
typeStruct typeKind = iota
|
||||
typeInts
|
||||
typeStrings
|
||||
typeBytes
|
||||
)
|
||||
|
||||
// NamedType represents the type of a parameter or return value.
|
||||
type namedType struct {
|
||||
Name string
|
||||
Text string
|
||||
Kind typeKind
|
||||
Fields []*field // For kind is typeStruct.
|
||||
// For kind is typeInts
|
||||
IntValues []struct {
|
||||
Name string
|
||||
Value int
|
||||
Docs string
|
||||
}
|
||||
// For kind is typeStrings
|
||||
StringValues []struct {
|
||||
Name string
|
||||
Value string
|
||||
Docs string
|
||||
}
|
||||
}
|
||||
|
||||
type function struct {
|
||||
Name string
|
||||
Text string
|
||||
Params []sherpadoc.Arg
|
||||
Returns []sherpadoc.Arg
|
||||
}
|
||||
|
||||
// Section is an API section with docs, functions and subsections.
|
||||
// Types are gathered per section, and moved up the section tree to the first common ancestor, so types are only documented once.
|
||||
type section struct {
|
||||
TypeName string // Name of the type for this section.
|
||||
Name string // Name of the section. Either same as TypeName, or overridden with a "sherpa" struct tag.
|
||||
Text string
|
||||
Types []*namedType
|
||||
Typeset map[string]struct{}
|
||||
Functions []*function
|
||||
Sections []*section
|
||||
}
|
||||
|
||||
func check(err error, action string) {
|
||||
if err != nil {
|
||||
log.Fatalf("%s: %s", action, err)
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
log.Println("usage: sherpadoc [flags] section")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
// If vendor exists, we load packages from it.
|
||||
for dir, _ := os.Getwd(); dir != "" && dir != "/"; dir = filepath.Dir(dir) {
|
||||
p := filepath.Join(dir, "go.mod")
|
||||
if _, err := os.Stat(p); err != nil && os.IsNotExist(err) {
|
||||
continue
|
||||
} else if err != nil {
|
||||
log.Printf("searching for go.mod: %v", err)
|
||||
break
|
||||
}
|
||||
|
||||
if _, err := os.Stat(filepath.Join(dir, "vendor")); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if gomod, err := os.ReadFile(p); err != nil {
|
||||
log.Fatalf("reading go.mod: %s", err)
|
||||
} else if mf, err := modfile.ParseLax("go.mod", gomod, nil); err != nil {
|
||||
log.Fatalf("parsing go.mod: %s", err)
|
||||
} else {
|
||||
gomodFile = mf
|
||||
gomodDir = dir
|
||||
}
|
||||
}
|
||||
|
||||
section := parseDoc(args[0], *packagePath)
|
||||
if *title != "" {
|
||||
section.Name = *title
|
||||
}
|
||||
|
||||
moveTypesUp(section)
|
||||
|
||||
doc := sherpaSection(section)
|
||||
doc.SherpaVersion = 0
|
||||
doc.SherpadocVersion = sherpadoc.SherpadocVersion
|
||||
|
||||
err := sherpadoc.Check(doc)
|
||||
check(err, "checking sherpadoc output before writing")
|
||||
|
||||
writeJSON(doc)
|
||||
}
|
||||
|
||||
func writeJSON(v interface{}) {
|
||||
buf, err := json.MarshalIndent(v, "", "\t")
|
||||
check(err, "marshal to json")
|
||||
_, err = os.Stdout.Write(buf)
|
||||
check(err, "writing json to stdout")
|
||||
_, err = fmt.Println()
|
||||
check(err, "write to stdout")
|
||||
}
|
||||
|
||||
type typeCount struct {
|
||||
t *namedType
|
||||
count int
|
||||
}
|
||||
|
||||
// Move types used in multiple sections up to their common ancestor.
|
||||
func moveTypesUp(sec *section) {
|
||||
// First, the process for each child.
|
||||
for _, s := range sec.Sections {
|
||||
moveTypesUp(s)
|
||||
}
|
||||
|
||||
// Count how often a type is used from here downwards.
|
||||
// If more than once, move the type up to here.
|
||||
counts := map[string]*typeCount{}
|
||||
countTypes(counts, sec)
|
||||
for _, tc := range counts {
|
||||
if tc.count <= 1 {
|
||||
continue
|
||||
}
|
||||
for _, sub := range sec.Sections {
|
||||
removeType(sub, tc.t)
|
||||
}
|
||||
if !hasType(sec, tc.t) {
|
||||
sec.Types = append(sec.Types, tc.t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func countTypes(counts map[string]*typeCount, sec *section) {
|
||||
for _, t := range sec.Types {
|
||||
_, ok := counts[t.Name]
|
||||
if !ok {
|
||||
counts[t.Name] = &typeCount{t, 0}
|
||||
}
|
||||
counts[t.Name].count++
|
||||
}
|
||||
for _, subsec := range sec.Sections {
|
||||
countTypes(counts, subsec)
|
||||
}
|
||||
}
|
||||
|
||||
func removeType(sec *section, t *namedType) {
|
||||
types := make([]*namedType, 0, len(sec.Types))
|
||||
for _, tt := range sec.Types {
|
||||
if tt.Name != t.Name {
|
||||
types = append(types, tt)
|
||||
}
|
||||
}
|
||||
sec.Types = types
|
||||
for _, sub := range sec.Sections {
|
||||
removeType(sub, t)
|
||||
}
|
||||
}
|
||||
|
||||
func hasType(sec *section, t *namedType) bool {
|
||||
for _, tt := range sec.Types {
|
||||
if tt.Name == t.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
857
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/parse.go
generated
vendored
Normal file
857
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/parse.go
generated
vendored
Normal file
@ -0,0 +1,857 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/doc"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/go/packages"
|
||||
|
||||
"github.com/mjl-/sherpadoc"
|
||||
)
|
||||
|
||||
// ParsedPackage possibly includes some of its imports because the package that contains the section references it.
|
||||
type parsedPackage struct {
|
||||
Fset *token.FileSet // Used with a token.Pos to get offending locations.
|
||||
Path string // Of import, used for keeping duplicate type names from different packages unique.
|
||||
Pkg *ast.Package // Needed for its files: we need a file to find the package path and identifier used to reference other types.
|
||||
Docpkg *doc.Package
|
||||
Imports map[string]*parsedPackage // Package/import path to parsed packages.
|
||||
}
|
||||
|
||||
type typewords []string
|
||||
|
||||
func (pp *parsedPackage) lookupType(name string) *doc.Type {
|
||||
for _, t := range pp.Docpkg.Types {
|
||||
if t.Name == name {
|
||||
return t
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Like log.Fatalf, but prefixes error message with offending file position (if known).
|
||||
// pp is the package where the position tok belongs to.
|
||||
func logFatalLinef(pp *parsedPackage, tok token.Pos, format string, args ...interface{}) {
|
||||
if !tok.IsValid() {
|
||||
log.Fatalf(format, args...)
|
||||
}
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
log.Fatalf("%s: %s", pp.Fset.Position(tok).String(), msg)
|
||||
}
|
||||
|
||||
// Documentation for a single field, with text above the field, and
|
||||
// on the right of the field combined.
|
||||
func fieldDoc(f *ast.Field) string {
|
||||
s := ""
|
||||
if f.Doc != nil {
|
||||
s += strings.Replace(strings.TrimSpace(f.Doc.Text()), "\n", " ", -1)
|
||||
}
|
||||
if f.Comment != nil {
|
||||
if s != "" {
|
||||
s += "; "
|
||||
}
|
||||
s += strings.TrimSpace(f.Comment.Text())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse string literal. Errors are fatal.
|
||||
func parseStringLiteral(s string) string {
|
||||
r, err := strconv.Unquote(s)
|
||||
check(err, "parsing string literal")
|
||||
return r
|
||||
}
|
||||
|
||||
func jsonName(tag string, name string) string {
|
||||
s := reflect.StructTag(tag).Get("json")
|
||||
if s == "" || strings.HasPrefix(s, ",") {
|
||||
return name
|
||||
} else if s == "-" {
|
||||
return ""
|
||||
} else {
|
||||
return strings.Split(s, ",")[0]
|
||||
}
|
||||
}
|
||||
|
||||
// Return the names (can be none) for a field. Takes exportedness
|
||||
// and JSON tag annotation into account.
|
||||
func nameList(names []*ast.Ident, tag *ast.BasicLit) []string {
|
||||
if names == nil {
|
||||
return nil
|
||||
}
|
||||
l := []string{}
|
||||
for _, name := range names {
|
||||
if ast.IsExported(name.Name) {
|
||||
l = append(l, name.Name)
|
||||
}
|
||||
}
|
||||
if len(l) == 1 && tag != nil {
|
||||
name := jsonName(parseStringLiteral(tag.Value), l[0])
|
||||
if name != "" {
|
||||
return []string{name}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Parses a top-level sherpadoc section.
|
||||
func parseDoc(apiName, packagePath string) *section {
|
||||
fset := token.NewFileSet()
|
||||
pkgs, firstErr := parser.ParseDir(fset, packagePath, nil, parser.ParseComments)
|
||||
check(firstErr, "parsing code")
|
||||
for _, pkg := range pkgs {
|
||||
docpkg := doc.New(pkg, "", doc.AllDecls)
|
||||
|
||||
for _, t := range docpkg.Types {
|
||||
if t.Name == apiName {
|
||||
par := &parsedPackage{
|
||||
Fset: fset,
|
||||
Path: packagePath,
|
||||
Pkg: pkg,
|
||||
Docpkg: docpkg,
|
||||
Imports: make(map[string]*parsedPackage),
|
||||
}
|
||||
return parseSection(t, par)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Fatalf("type %q not found", apiName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse a section and its optional subsections, recursively.
|
||||
// t is the type of the struct with the sherpa methods to be parsed.
|
||||
func parseSection(t *doc.Type, pp *parsedPackage) *section {
|
||||
sec := §ion{
|
||||
t.Name,
|
||||
t.Name,
|
||||
strings.TrimSpace(t.Doc),
|
||||
nil,
|
||||
map[string]struct{}{},
|
||||
nil,
|
||||
nil,
|
||||
}
|
||||
|
||||
// make list of methods to parse, sorted by position in file name.
|
||||
methods := make([]*doc.Func, len(t.Methods))
|
||||
copy(methods, t.Methods)
|
||||
sort.Slice(methods, func(i, j int) bool {
|
||||
return methods[i].Decl.Name.NamePos < methods[j].Decl.Name.NamePos
|
||||
})
|
||||
|
||||
for _, fn := range methods {
|
||||
parseMethod(sec, fn, pp)
|
||||
}
|
||||
|
||||
// parse subsections
|
||||
ts := t.Decl.Specs[0].(*ast.TypeSpec)
|
||||
expr := ts.Type
|
||||
st := expr.(*ast.StructType)
|
||||
for _, f := range st.Fields.List {
|
||||
ident, ok := f.Type.(*ast.Ident)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
name := ident.Name
|
||||
if f.Tag != nil {
|
||||
name = reflect.StructTag(parseStringLiteral(f.Tag.Value)).Get("sherpa")
|
||||
}
|
||||
subt := pp.lookupType(ident.Name)
|
||||
if subt == nil {
|
||||
logFatalLinef(pp, ident.Pos(), "subsection %q not found", ident.Name)
|
||||
}
|
||||
subsec := parseSection(subt, pp)
|
||||
subsec.Name = name
|
||||
sec.Sections = append(sec.Sections, subsec)
|
||||
}
|
||||
return sec
|
||||
}
|
||||
|
||||
// Ensure type "t" (used in a field or argument) defined in package pp is parsed
|
||||
// and added to the section.
|
||||
func ensureNamedType(t *doc.Type, sec *section, pp *parsedPackage) {
|
||||
typePath := pp.Path + "." + t.Name
|
||||
if _, have := sec.Typeset[typePath]; have {
|
||||
return
|
||||
}
|
||||
|
||||
tt := &namedType{
|
||||
Name: t.Name,
|
||||
Text: strings.TrimSpace(t.Doc),
|
||||
}
|
||||
// add it early, so self-referencing types can't cause a loop
|
||||
sec.Types = append(sec.Types, tt)
|
||||
sec.Typeset[typePath] = struct{}{}
|
||||
|
||||
ts := t.Decl.Specs[0].(*ast.TypeSpec)
|
||||
if ts.Assign.IsValid() {
|
||||
logFatalLinef(pp, t.Decl.TokPos, "type aliases not yet supported")
|
||||
}
|
||||
|
||||
var gatherFields func(e ast.Expr, typeName string, xpp *parsedPackage)
|
||||
var gatherStructFields func(nt *ast.StructType, typeName string, xpp *parsedPackage)
|
||||
|
||||
gatherFields = func(e ast.Expr, typeName string, xpp *parsedPackage) {
|
||||
switch xt := e.(type) {
|
||||
case *ast.Ident:
|
||||
// Bare type name.
|
||||
tt := xpp.lookupType(xt.Name)
|
||||
if tt == nil {
|
||||
log.Fatalf("could not find type %q used in type %q in package %q", xt.Name, typeName, xpp.Path)
|
||||
}
|
||||
tts := tt.Decl.Specs[0].(*ast.TypeSpec)
|
||||
if ts.Assign.IsValid() {
|
||||
logFatalLinef(xpp, tt.Decl.TokPos, "type aliases not yet supported")
|
||||
}
|
||||
tst, ok := tts.Type.(*ast.StructType)
|
||||
if !ok {
|
||||
logFatalLinef(xpp, tt.Decl.TokPos, "unexpected field type %T", tts.Type)
|
||||
}
|
||||
gatherStructFields(tst, tt.Name, xpp)
|
||||
case *ast.StarExpr:
|
||||
// Field with "*", handle as if without *.
|
||||
gatherFields(xt.X, typeName, xpp)
|
||||
case *ast.SelectorExpr:
|
||||
// With package prefix, lookup the type in the package and gather its fields.
|
||||
dt, nxpp := parseFieldSelector(useSrc{xpp, typeName}, xt)
|
||||
tts := dt.Decl.Specs[0].(*ast.TypeSpec)
|
||||
if ts.Assign.IsValid() {
|
||||
logFatalLinef(nxpp, dt.Decl.TokPos, "type aliases not yet supported")
|
||||
}
|
||||
tst, ok := tts.Type.(*ast.StructType)
|
||||
if !ok {
|
||||
logFatalLinef(nxpp, dt.Decl.TokPos, "unexpected field type %T", tts.Type)
|
||||
}
|
||||
gatherStructFields(tst, dt.Name, nxpp)
|
||||
default:
|
||||
logFatalLinef(xpp, t.Decl.TokPos, "unsupported field with type %T", e)
|
||||
}
|
||||
}
|
||||
|
||||
gatherStructFields = func(nt *ast.StructType, typeName string, xpp *parsedPackage) {
|
||||
for _, f := range nt.Fields.List {
|
||||
if len(f.Names) == 0 {
|
||||
// Embedded field. Treat its fields as if they were included.
|
||||
gatherFields(f.Type, typeName, xpp)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if we need this type. Otherwise we may trip
|
||||
// over an unhandled type that we wouldn't include in
|
||||
// the output (eg due to a struct tag).
|
||||
names := nameList(f.Names, f.Tag)
|
||||
need := false
|
||||
for _, name := range names {
|
||||
if name != "" {
|
||||
need = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !need {
|
||||
continue
|
||||
}
|
||||
|
||||
ff := &field{
|
||||
"",
|
||||
nil,
|
||||
fieldDoc(f),
|
||||
[]*field{},
|
||||
}
|
||||
ff.Typewords = gatherFieldType(t.Name, ff, f.Type, f.Tag, sec, xpp)
|
||||
for _, name := range nameList(f.Names, f.Tag) {
|
||||
nf := &field{}
|
||||
*nf = *ff
|
||||
nf.Name = name
|
||||
tt.Fields = append(tt.Fields, nf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch nt := ts.Type.(type) {
|
||||
case *ast.StructType:
|
||||
tt.Kind = typeStruct
|
||||
gatherStructFields(nt, t.Name, pp)
|
||||
|
||||
case *ast.ArrayType:
|
||||
if ident, ok := nt.Elt.(*ast.Ident); ok && ident.Name == "byte" {
|
||||
tt.Kind = typeBytes
|
||||
} else {
|
||||
logFatalLinef(pp, t.Decl.TokPos, "named type with unsupported element type %T", ts.Type)
|
||||
}
|
||||
|
||||
case *ast.Ident:
|
||||
if strings.HasSuffix(typePath, "sherpa.Int64s") || strings.HasSuffix(typePath, "sherpa.Uint64s") {
|
||||
return
|
||||
}
|
||||
|
||||
tt.Text = t.Doc + ts.Comment.Text()
|
||||
switch nt.Name {
|
||||
case "byte", "int16", "uint16", "int32", "uint32", "int", "uint":
|
||||
tt.Kind = typeInts
|
||||
case "string":
|
||||
tt.Kind = typeStrings
|
||||
default:
|
||||
logFatalLinef(pp, t.Decl.TokPos, "unrecognized type identifier %#v", nt.Name)
|
||||
}
|
||||
|
||||
for _, c := range t.Consts {
|
||||
for _, spec := range c.Decl.Specs {
|
||||
vs, ok := spec.(*ast.ValueSpec)
|
||||
if !ok {
|
||||
logFatalLinef(pp, spec.Pos(), "unsupported non-ast.ValueSpec constant %#v", spec)
|
||||
}
|
||||
if len(vs.Names) != 1 {
|
||||
logFatalLinef(pp, vs.Pos(), "unsupported multiple .Names in %#v", vs)
|
||||
}
|
||||
name := vs.Names[0].Name
|
||||
if len(vs.Values) != 1 {
|
||||
logFatalLinef(pp, vs.Pos(), "unsupported multiple .Values in %#v", vs)
|
||||
}
|
||||
lit, ok := vs.Values[0].(*ast.BasicLit)
|
||||
if !ok {
|
||||
logFatalLinef(pp, vs.Pos(), "unsupported non-ast.BasicLit first .Values %#v", vs)
|
||||
}
|
||||
|
||||
comment := vs.Doc.Text() + vs.Comment.Text()
|
||||
switch lit.Kind {
|
||||
case token.INT:
|
||||
if tt.Kind != typeInts {
|
||||
logFatalLinef(pp, lit.Pos(), "int value for for non-int-enum %q", t.Name)
|
||||
}
|
||||
v, err := strconv.ParseInt(lit.Value, 10, 64)
|
||||
check(err, "parse int literal")
|
||||
iv := struct {
|
||||
Name string
|
||||
Value int
|
||||
Docs string
|
||||
}{name, int(v), strings.TrimSpace(comment)}
|
||||
tt.IntValues = append(tt.IntValues, iv)
|
||||
case token.STRING:
|
||||
if tt.Kind != typeStrings {
|
||||
logFatalLinef(pp, lit.Pos(), "string for non-string-enum %q", t.Name)
|
||||
}
|
||||
v, err := strconv.Unquote(lit.Value)
|
||||
check(err, "unquote literal")
|
||||
sv := struct {
|
||||
Name string
|
||||
Value string
|
||||
Docs string
|
||||
}{name, v, strings.TrimSpace(comment)}
|
||||
tt.StringValues = append(tt.StringValues, sv)
|
||||
default:
|
||||
logFatalLinef(pp, lit.Pos(), "unexpected literal kind %#v", lit.Kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
logFatalLinef(pp, t.Decl.TokPos, "unsupported field/param/return type %T", ts.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func hasOmitEmpty(tag *ast.BasicLit) bool {
|
||||
return hasJSONTagValue(tag, "omitempty")
|
||||
}
|
||||
|
||||
// isCommaString returns whether the tag (may be nil) contains a "json:,string" directive.
|
||||
func isCommaString(tag *ast.BasicLit) bool {
|
||||
return hasJSONTagValue(tag, "string")
|
||||
}
|
||||
|
||||
func hasJSONTagValue(tag *ast.BasicLit, v string) bool {
|
||||
if tag == nil {
|
||||
return false
|
||||
}
|
||||
st := reflect.StructTag(parseStringLiteral(tag.Value))
|
||||
s, ok := st.Lookup("json")
|
||||
if !ok || s == "-" {
|
||||
return false
|
||||
}
|
||||
t := strings.Split(s, ",")
|
||||
for _, e := range t[1:] {
|
||||
if e == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func gatherFieldType(typeName string, f *field, e ast.Expr, fieldTag *ast.BasicLit, sec *section, pp *parsedPackage) typewords {
|
||||
nullablePrefix := typewords{}
|
||||
if hasOmitEmpty(fieldTag) {
|
||||
nullablePrefix = typewords{"nullable"}
|
||||
}
|
||||
|
||||
name := checkReplacedType(useSrc{pp, typeName}, e)
|
||||
if name != nil {
|
||||
if name[0] != "nullable" {
|
||||
return append(nullablePrefix, name...)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
switch t := e.(type) {
|
||||
case *ast.Ident:
|
||||
tt := pp.lookupType(t.Name)
|
||||
if tt != nil {
|
||||
ensureNamedType(tt, sec, pp)
|
||||
return []string{t.Name}
|
||||
}
|
||||
commaString := isCommaString(fieldTag)
|
||||
name := t.Name
|
||||
switch name {
|
||||
case "byte":
|
||||
name = "uint8"
|
||||
case "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float32", "float64", "string", "any":
|
||||
case "int64", "uint64":
|
||||
if commaString {
|
||||
name += "s"
|
||||
}
|
||||
case "int", "uint":
|
||||
name += "32"
|
||||
default:
|
||||
logFatalLinef(pp, t.Pos(), "unsupported field type %q used in type %q in package %q", name, typeName, pp.Path)
|
||||
}
|
||||
if commaString && name != "int64s" && name != "uint64s" {
|
||||
logFatalLinef(pp, t.Pos(), "unsupported tag `json:,\"string\"` for non-64bit int in %s.%s", typeName, f.Name)
|
||||
}
|
||||
return append(nullablePrefix, name)
|
||||
case *ast.ArrayType:
|
||||
return append(nullablePrefix, append([]string{"[]"}, gatherFieldType(typeName, f, t.Elt, nil, sec, pp)...)...)
|
||||
case *ast.MapType:
|
||||
_ = gatherFieldType(typeName, f, t.Key, nil, sec, pp)
|
||||
vt := gatherFieldType(typeName, f, t.Value, nil, sec, pp)
|
||||
return append(nullablePrefix, append([]string{"{}"}, vt...)...)
|
||||
case *ast.InterfaceType:
|
||||
// If we export an interface as an "any" type, we want to make sure it's intended.
|
||||
// Require the user to be explicit with an empty interface.
|
||||
if t.Methods != nil && len(t.Methods.List) > 0 {
|
||||
logFatalLinef(pp, t.Pos(), "unsupported non-empty interface param/return type %T", t)
|
||||
}
|
||||
return append(nullablePrefix, "any")
|
||||
case *ast.StarExpr:
|
||||
tw := gatherFieldType(typeName, f, t.X, fieldTag, sec, pp)
|
||||
if tw[0] != "nullable" {
|
||||
tw = append([]string{"nullable"}, tw...)
|
||||
}
|
||||
return tw
|
||||
case *ast.SelectorExpr:
|
||||
return append(nullablePrefix, parseSelector(t, typeName, sec, pp))
|
||||
}
|
||||
logFatalLinef(pp, e.Pos(), "unimplemented ast.Expr %#v for struct %q field %q in gatherFieldType", e, typeName, f.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseArgType(e ast.Expr, sec *section, pp *parsedPackage) typewords {
|
||||
name := checkReplacedType(useSrc{pp, sec.Name}, e)
|
||||
if name != nil {
|
||||
return name
|
||||
}
|
||||
|
||||
switch t := e.(type) {
|
||||
case *ast.Ident:
|
||||
tt := pp.lookupType(t.Name)
|
||||
if tt != nil {
|
||||
ensureNamedType(tt, sec, pp)
|
||||
return []string{t.Name}
|
||||
}
|
||||
name := t.Name
|
||||
switch name {
|
||||
case "byte":
|
||||
name = "uint8"
|
||||
case "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "float32", "float64", "string", "any":
|
||||
case "int", "uint":
|
||||
name += "32"
|
||||
case "error":
|
||||
// allowed here, checked if in right location by caller
|
||||
default:
|
||||
logFatalLinef(pp, t.Pos(), "unsupported arg type %q", name)
|
||||
}
|
||||
return []string{name}
|
||||
case *ast.ArrayType:
|
||||
return append([]string{"[]"}, parseArgType(t.Elt, sec, pp)...)
|
||||
case *ast.Ellipsis:
|
||||
// Ellipsis parameters to a function must be passed as an array, so document it that way.
|
||||
return append([]string{"[]"}, parseArgType(t.Elt, sec, pp)...)
|
||||
case *ast.MapType:
|
||||
_ = parseArgType(t.Key, sec, pp)
|
||||
vt := parseArgType(t.Value, sec, pp)
|
||||
return append([]string{"{}"}, vt...)
|
||||
case *ast.InterfaceType:
|
||||
// If we export an interface as an "any" type, we want to make sure it's intended.
|
||||
// Require the user to be explicit with an empty interface.
|
||||
if t.Methods != nil && len(t.Methods.List) > 0 {
|
||||
logFatalLinef(pp, t.Pos(), "unsupported non-empty interface param/return type %T", t)
|
||||
}
|
||||
return []string{"any"}
|
||||
case *ast.StarExpr:
|
||||
return append([]string{"nullable"}, parseArgType(t.X, sec, pp)...)
|
||||
case *ast.SelectorExpr:
|
||||
return []string{parseSelector(t, sec.TypeName, sec, pp)}
|
||||
}
|
||||
logFatalLinef(pp, e.Pos(), "unimplemented ast.Expr %#v in parseArgType", e)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse the selector of a field, returning the type and the parsed package it exists in. This cannot be a builtin type.
|
||||
func parseFieldSelector(u useSrc, t *ast.SelectorExpr) (*doc.Type, *parsedPackage) {
|
||||
packageIdent, ok := t.X.(*ast.Ident)
|
||||
if !ok {
|
||||
u.Fatalf(t.Pos(), "unexpected non-ident for SelectorExpr.X")
|
||||
}
|
||||
pkgName := packageIdent.Name
|
||||
typeName := t.Sel.Name
|
||||
|
||||
importPath := u.lookupPackageImportPath(pkgName)
|
||||
if importPath == "" {
|
||||
u.Fatalf(t.Pos(), "cannot find source for type %q that references package %q (perhaps try -replace)", u, pkgName)
|
||||
}
|
||||
|
||||
opp := u.Ppkg.ensurePackageParsed(importPath)
|
||||
tt := opp.lookupType(typeName)
|
||||
if tt == nil {
|
||||
u.Fatalf(t.Pos(), "could not find type %q in package %q", typeName, importPath)
|
||||
}
|
||||
return tt, opp
|
||||
}
|
||||
|
||||
func parseSelector(t *ast.SelectorExpr, srcTypeName string, sec *section, pp *parsedPackage) string {
|
||||
packageIdent, ok := t.X.(*ast.Ident)
|
||||
if !ok {
|
||||
logFatalLinef(pp, t.Pos(), "unexpected non-ident for SelectorExpr.X")
|
||||
}
|
||||
pkgName := packageIdent.Name
|
||||
typeName := t.Sel.Name
|
||||
|
||||
if pkgName == "time" && typeName == "Time" {
|
||||
return "timestamp"
|
||||
}
|
||||
if pkgName == "sherpa" {
|
||||
switch typeName {
|
||||
case "Int64s":
|
||||
return "int64s"
|
||||
case "Uint64s":
|
||||
return "uint64s"
|
||||
}
|
||||
}
|
||||
|
||||
importPath := pp.lookupPackageImportPath(srcTypeName, pkgName)
|
||||
if importPath == "" {
|
||||
logFatalLinef(pp, t.Pos(), "cannot find source for %q (perhaps try -replace)", fmt.Sprintf("%s.%s", pkgName, typeName))
|
||||
}
|
||||
|
||||
opp := pp.ensurePackageParsed(importPath)
|
||||
tt := opp.lookupType(typeName)
|
||||
if tt == nil {
|
||||
logFatalLinef(pp, t.Pos(), "could not find type %q in package %q", typeName, importPath)
|
||||
}
|
||||
ensureNamedType(tt, sec, opp)
|
||||
return typeName
|
||||
}
|
||||
|
||||
type replacement struct {
|
||||
original string // a Go type, eg "pkg.Type" or "*pkg.Type"
|
||||
target typewords
|
||||
}
|
||||
|
||||
var _replacements []replacement
|
||||
|
||||
func typeReplacements() []replacement {
|
||||
if _replacements != nil {
|
||||
return _replacements
|
||||
}
|
||||
|
||||
_replacements = []replacement{}
|
||||
for _, repl := range strings.Split(*replace, ",") {
|
||||
if repl == "" {
|
||||
continue
|
||||
}
|
||||
tokens := strings.Split(repl, " ")
|
||||
if len(tokens) < 2 {
|
||||
log.Fatalf("bad replacement %q, must have at least two tokens, space-separated", repl)
|
||||
}
|
||||
r := replacement{tokens[0], tokens[1:]}
|
||||
_replacements = append(_replacements, r)
|
||||
}
|
||||
return _replacements
|
||||
}
|
||||
|
||||
// Use of a type Name from package Ppkg. Used to look up references from that
|
||||
// location (the file where the type is defined, with its imports) for a given Go
|
||||
// ast.
|
||||
type useSrc struct {
|
||||
Ppkg *parsedPackage
|
||||
Name string
|
||||
}
|
||||
|
||||
func (u useSrc) lookupPackageImportPath(pkgName string) string {
|
||||
return u.Ppkg.lookupPackageImportPath(u.Name, pkgName)
|
||||
}
|
||||
|
||||
func (u useSrc) String() string {
|
||||
return fmt.Sprintf("%s.%s", u.Ppkg.Path, u.Name)
|
||||
}
|
||||
|
||||
func (u useSrc) Fatalf(tok token.Pos, format string, args ...interface{}) {
|
||||
logFatalLinef(u.Ppkg, tok, format, args...)
|
||||
}
|
||||
|
||||
// Return a go type name, eg "*time.Time".
|
||||
// This function does not parse the types itself, because it would mean they could
|
||||
// be added to the sherpadoc output even if they aren't otherwise used (due to
|
||||
// replacement).
|
||||
func goTypeName(u useSrc, e ast.Expr) string {
|
||||
switch t := e.(type) {
|
||||
case *ast.Ident:
|
||||
return t.Name
|
||||
case *ast.ArrayType:
|
||||
return "[]" + goTypeName(u, t.Elt)
|
||||
case *ast.Ellipsis:
|
||||
// Ellipsis parameters to a function must be passed as an array, so document it that way.
|
||||
return "[]" + goTypeName(u, t.Elt)
|
||||
case *ast.MapType:
|
||||
return fmt.Sprintf("map[%s]%s", goTypeName(u, t.Key), goTypeName(u, t.Value))
|
||||
case *ast.InterfaceType:
|
||||
return "interface{}"
|
||||
case *ast.StarExpr:
|
||||
return "*" + goTypeName(u, t.X)
|
||||
case *ast.SelectorExpr:
|
||||
packageIdent, ok := t.X.(*ast.Ident)
|
||||
if !ok {
|
||||
u.Fatalf(t.Pos(), "unexpected non-ident for SelectorExpr.X")
|
||||
}
|
||||
pkgName := packageIdent.Name
|
||||
typeName := t.Sel.Name
|
||||
|
||||
importPath := u.lookupPackageImportPath(pkgName)
|
||||
if importPath != "" {
|
||||
return fmt.Sprintf("%s.%s", importPath, typeName)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", pkgName, typeName)
|
||||
// todo: give proper error message for *ast.StructType
|
||||
}
|
||||
u.Fatalf(e.Pos(), "unimplemented ast.Expr %#v in goTypeName", e)
|
||||
return ""
|
||||
}
|
||||
|
||||
func checkReplacedType(u useSrc, e ast.Expr) typewords {
|
||||
repls := typeReplacements()
|
||||
if len(repls) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
name := goTypeName(u, e)
|
||||
return replacementType(repls, name)
|
||||
}
|
||||
|
||||
func replacementType(repls []replacement, name string) typewords {
|
||||
for _, repl := range repls {
|
||||
if repl.original == name {
|
||||
return repl.target
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensures the package for importPath has been parsed at least once, and return it.
|
||||
func (pp *parsedPackage) ensurePackageParsed(importPath string) *parsedPackage {
|
||||
r := pp.Imports[importPath]
|
||||
if r != nil {
|
||||
return r
|
||||
}
|
||||
|
||||
var localPath string
|
||||
var astPkg *ast.Package
|
||||
var fset *token.FileSet
|
||||
|
||||
// If dependencies are vendored, we load packages from vendor/. This is typically
|
||||
// faster than using package.Load (the fallback), which may spawn commands.
|
||||
// For me, while testing, for loading a simple package from the same module goes
|
||||
// from 50-100 ms to 1-5ms. Loading "net" from 200ms to 65ms.
|
||||
|
||||
if gomodFile != nil {
|
||||
if importPath == gomodFile.Module.Mod.Path {
|
||||
localPath = gomodDir
|
||||
} else if strings.HasPrefix(importPath, gomodFile.Module.Mod.Path+"/") {
|
||||
localPath = filepath.Join(gomodDir, strings.TrimPrefix(importPath, gomodFile.Module.Mod.Path+"/"))
|
||||
} else {
|
||||
p := filepath.Join(gomodDir, "vendor", importPath)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
localPath = p
|
||||
} else {
|
||||
localPath = filepath.Join(runtime.GOROOT(), "src", importPath)
|
||||
}
|
||||
}
|
||||
|
||||
fset = token.NewFileSet()
|
||||
astPkgs, err := parser.ParseDir(fset, localPath, nil, parser.ParseComments|parser.DeclarationErrors)
|
||||
check(err, "parsing go files from "+localPath)
|
||||
for name, pkg := range astPkgs {
|
||||
if strings.HasSuffix(name, "_test") {
|
||||
continue
|
||||
}
|
||||
if astPkg != nil {
|
||||
log.Fatalf("loading package %q: multiple packages found", importPath)
|
||||
}
|
||||
astPkg = pkg
|
||||
}
|
||||
} else {
|
||||
config := &packages.Config{
|
||||
Mode: packages.NeedName | packages.NeedFiles,
|
||||
}
|
||||
pkgs, err := packages.Load(config, importPath)
|
||||
check(err, "loading package")
|
||||
if len(pkgs) != 1 {
|
||||
log.Fatalf("loading package %q: got %d packages, expected 1", importPath, len(pkgs))
|
||||
}
|
||||
pkg := pkgs[0]
|
||||
if len(pkg.GoFiles) == 0 {
|
||||
log.Fatalf("loading package %q: no go files found", importPath)
|
||||
}
|
||||
|
||||
fset = token.NewFileSet()
|
||||
localPath = filepath.Dir(pkg.GoFiles[0])
|
||||
astPkgs, err := parser.ParseDir(fset, localPath, nil, parser.ParseComments)
|
||||
check(err, "parsing go files from directory")
|
||||
var ok bool
|
||||
astPkg, ok = astPkgs[pkg.Name]
|
||||
if !ok {
|
||||
log.Fatalf("loading package %q: could not find astPkg for %q", importPath, pkg.Name)
|
||||
}
|
||||
}
|
||||
|
||||
docpkg := doc.New(astPkg, "", doc.AllDecls|doc.PreserveAST)
|
||||
|
||||
npp := &parsedPackage{
|
||||
Fset: fset,
|
||||
Path: localPath,
|
||||
Pkg: astPkg,
|
||||
Docpkg: docpkg,
|
||||
Imports: make(map[string]*parsedPackage),
|
||||
}
|
||||
pp.Imports[importPath] = npp
|
||||
return npp
|
||||
}
|
||||
|
||||
// LookupPackageImportPath returns the import/package path for pkgName as used as
|
||||
// used in the type named typeName.
|
||||
func (pp *parsedPackage) lookupPackageImportPath(typeName, pkgName string) string {
|
||||
file := pp.lookupTypeFile(typeName)
|
||||
for _, imp := range file.Imports {
|
||||
if imp.Name != nil && imp.Name.Name == pkgName || imp.Name == nil && (parseStringLiteral(imp.Path.Value) == pkgName || strings.HasSuffix(parseStringLiteral(imp.Path.Value), "/"+pkgName)) {
|
||||
return parseStringLiteral(imp.Path.Value)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// LookupTypeFile returns the go source file that containst he definition of the type named typeName.
|
||||
func (pp *parsedPackage) lookupTypeFile(typeName string) *ast.File {
|
||||
for _, file := range pp.Pkg.Files {
|
||||
for _, decl := range file.Decls {
|
||||
switch d := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
for _, spec := range d.Specs {
|
||||
switch s := spec.(type) {
|
||||
case *ast.TypeSpec:
|
||||
if s.Name.Name == typeName {
|
||||
return file
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Fatalf("could not find type %q", fmt.Sprintf("%s.%s", pp.Path, typeName))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Populate "params" with the arguments from "fields", which are function parameters or return type.
|
||||
func parseArgs(params *[]sherpadoc.Arg, fields *ast.FieldList, sec *section, pp *parsedPackage, isParams bool) {
|
||||
if fields == nil {
|
||||
return
|
||||
}
|
||||
addParam := func(name string, tw typewords) {
|
||||
param := sherpadoc.Arg{Name: name, Typewords: tw}
|
||||
*params = append(*params, param)
|
||||
}
|
||||
for _, f := range fields.List {
|
||||
typ := parseArgType(f.Type, sec, pp)
|
||||
// Handle named params. Can be both arguments to a function or return types.
|
||||
for _, name := range f.Names {
|
||||
addParam(name.Name, typ)
|
||||
}
|
||||
// Return types often don't have a name, don't forget them.
|
||||
if len(f.Names) == 0 {
|
||||
addParam("", typ)
|
||||
}
|
||||
}
|
||||
|
||||
for i, p := range *params {
|
||||
if p.Typewords[len(p.Typewords)-1] != "error" {
|
||||
continue
|
||||
}
|
||||
if isParams || i != len(*params)-1 {
|
||||
logFatalLinef(pp, fields.Pos(), "can only have error type as last return value")
|
||||
}
|
||||
pp := *params
|
||||
*params = pp[:len(pp)-1]
|
||||
}
|
||||
}
|
||||
|
||||
func adjustFunctionName(s string) string {
|
||||
switch *adjustFunctionNames {
|
||||
case "":
|
||||
return strings.ToLower(s[:1]) + s[1:]
|
||||
case "none":
|
||||
return s
|
||||
case "lowerWord":
|
||||
r := ""
|
||||
for i, c := range s {
|
||||
lc := unicode.ToLower(c)
|
||||
if lc == c {
|
||||
r += s[i:]
|
||||
break
|
||||
}
|
||||
r += string(lc)
|
||||
}
|
||||
return r
|
||||
default:
|
||||
panic(fmt.Sprintf("bad value for flag adjust-function-names: %q", *adjustFunctionNames))
|
||||
}
|
||||
}
|
||||
|
||||
// ParseMethod ensures the function fn from package pp ends up in section sec, with parameters/return named types filled in.
|
||||
func parseMethod(sec *section, fn *doc.Func, pp *parsedPackage) {
|
||||
f := &function{
|
||||
Name: adjustFunctionName(fn.Name),
|
||||
Text: fn.Doc,
|
||||
Params: []sherpadoc.Arg{},
|
||||
Returns: []sherpadoc.Arg{},
|
||||
}
|
||||
|
||||
// If first function parameter is context.Context, we skip it in the documentation.
|
||||
// The sherpa handler automatically fills it with the http request context when called.
|
||||
params := fn.Decl.Type.Params
|
||||
if params != nil && len(params.List) > 0 && len(params.List[0].Names) == 1 && goTypeName(useSrc{pp, sec.Name}, params.List[0].Type) == "context.Context" {
|
||||
params.List = params.List[1:]
|
||||
}
|
||||
isParams := true
|
||||
parseArgs(&f.Params, params, sec, pp, isParams)
|
||||
|
||||
isParams = false
|
||||
parseArgs(&f.Returns, fn.Decl.Type.Results, sec, pp, isParams)
|
||||
sec.Functions = append(sec.Functions, f)
|
||||
}
|
85
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/sherpa.go
generated
vendored
Normal file
85
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/sherpa.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mjl-/sherpadoc"
|
||||
)
|
||||
|
||||
func sherpaSection(sec *section) *sherpadoc.Section {
|
||||
doc := &sherpadoc.Section{
|
||||
Name: sec.Name,
|
||||
Docs: sec.Text,
|
||||
Functions: []*sherpadoc.Function{},
|
||||
Sections: []*sherpadoc.Section{},
|
||||
Structs: []sherpadoc.Struct{},
|
||||
Ints: []sherpadoc.Ints{},
|
||||
Strings: []sherpadoc.Strings{},
|
||||
}
|
||||
for _, t := range sec.Types {
|
||||
switch t.Kind {
|
||||
case typeStruct:
|
||||
tt := sherpadoc.Struct{
|
||||
Name: t.Name,
|
||||
Docs: t.Text,
|
||||
Fields: []sherpadoc.Field{},
|
||||
}
|
||||
for _, f := range t.Fields {
|
||||
ff := sherpadoc.Field{
|
||||
Name: f.Name,
|
||||
Docs: f.Doc,
|
||||
Typewords: f.Typewords,
|
||||
}
|
||||
tt.Fields = append(tt.Fields, ff)
|
||||
}
|
||||
doc.Structs = append(doc.Structs, tt)
|
||||
case typeInts:
|
||||
e := sherpadoc.Ints{
|
||||
Name: t.Name,
|
||||
Docs: strings.TrimSpace(t.Text),
|
||||
Values: t.IntValues,
|
||||
}
|
||||
doc.Ints = append(doc.Ints, e)
|
||||
case typeStrings:
|
||||
e := sherpadoc.Strings{
|
||||
Name: t.Name,
|
||||
Docs: strings.TrimSpace(t.Text),
|
||||
Values: t.StringValues,
|
||||
}
|
||||
doc.Strings = append(doc.Strings, e)
|
||||
case typeBytes:
|
||||
// todo: hack. find proper way to docment them. better for larger functionality: add generic support for lists of types. for now we'll fake this being a string...
|
||||
e := sherpadoc.Strings{
|
||||
Name: t.Name,
|
||||
Docs: strings.TrimSpace(t.Text),
|
||||
Values: []struct{Name string; Value string; Docs string}{},
|
||||
}
|
||||
doc.Strings = append(doc.Strings, e)
|
||||
default:
|
||||
panic("missing case")
|
||||
}
|
||||
}
|
||||
for _, fn := range sec.Functions {
|
||||
// Ensure returns always have a name. Go can leave them nameless.
|
||||
// Either they all have names or they don't, so the names we make up will never clash.
|
||||
for i := range fn.Returns {
|
||||
if fn.Returns[i].Name == "" {
|
||||
fn.Returns[i].Name = fmt.Sprintf("r%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
f := &sherpadoc.Function{
|
||||
Name: fn.Name,
|
||||
Docs: strings.TrimSpace(fn.Text),
|
||||
Params: fn.Params,
|
||||
Returns: fn.Returns,
|
||||
}
|
||||
doc.Functions = append(doc.Functions, f)
|
||||
}
|
||||
for _, subsec := range sec.Sections {
|
||||
doc.Sections = append(doc.Sections, sherpaSection(subsec))
|
||||
}
|
||||
doc.Docs = strings.TrimSpace(doc.Docs)
|
||||
return doc
|
||||
}
|
84
vendor/github.com/mjl-/sherpadoc/sherpadoc.go
generated
vendored
Normal file
84
vendor/github.com/mjl-/sherpadoc/sherpadoc.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Package sherpadoc contains types for reading and writing documentation for sherpa API's.
|
||||
package sherpadoc
|
||||
|
||||
const (
|
||||
// SherpadocVersion is the sherpadoc version generated by this command.
|
||||
SherpadocVersion = 1
|
||||
)
|
||||
|
||||
// Section represents documentation about a Sherpa API section, as returned by the "_docs" function.
|
||||
type Section struct {
|
||||
Name string // Name of an API section.
|
||||
Docs string // Explanation of the API in text or markdown.
|
||||
Functions []*Function // Functions in this section.
|
||||
Sections []*Section // Subsections, each with their own documentation.
|
||||
Structs []Struct // Structs as named types.
|
||||
Ints []Ints // Int enums as named types.
|
||||
Strings []Strings // String enums used as named types.
|
||||
|
||||
Version string `json:",omitempty"` // Version if this API, only relevant for the top-level section of an API. Typically filled in by server at startup.
|
||||
SherpaVersion int // Version of sherpa this API implements. Currently at 0. Typically filled in by server at startup.
|
||||
SherpadocVersion int `json:",omitempty"` // Version of the sherpadoc format. Currently at 1, the first defined version. Only relevant for the top-level section of an API.
|
||||
}
|
||||
|
||||
// Function contains the documentation for a single function.
|
||||
type Function struct {
|
||||
Name string // Name of the function.
|
||||
Docs string // Text or markdown, describing the function, its parameters, return types and possible errors.
|
||||
Params []Arg
|
||||
Returns []Arg
|
||||
}
|
||||
|
||||
// Arg is the name and type of a function parameter or return value.
|
||||
//
|
||||
// Production rules:
|
||||
//
|
||||
// basictype := "bool" | "int8", "uint8" | "int16" | "uint16" | "int32" | "uint32" | "int64" | "uint64" | "int64s" | "uint64s" | "float32" | "float64" | "string" | "timestamp"
|
||||
// array := "[]"
|
||||
// map := "{}"
|
||||
// identifier := [a-zA-Z][a-zA-Z0-9]*
|
||||
// type := "nullable"? ("any" | basictype | identifier | array type | map type)
|
||||
//
|
||||
// It is not possible to have inline structs in an Arg. Those must be encoded as a
|
||||
// named type.
|
||||
type Arg struct {
|
||||
Name string // Name of the argument.
|
||||
Typewords []string // Typewords is an array of tokens describing the type.
|
||||
}
|
||||
|
||||
// Struct is a named compound type.
|
||||
type Struct struct {
|
||||
Name string
|
||||
Docs string
|
||||
Fields []Field
|
||||
}
|
||||
|
||||
// Field is a single field of a struct type.
|
||||
// The type can reference another named type.
|
||||
type Field struct {
|
||||
Name string
|
||||
Docs string
|
||||
Typewords []string
|
||||
}
|
||||
|
||||
// Ints is a type representing an enum with integers as types.
|
||||
type Ints struct {
|
||||
Name string
|
||||
Docs string
|
||||
Values []struct {
|
||||
Name string
|
||||
Value int
|
||||
Docs string
|
||||
}
|
||||
}
|
||||
|
||||
// Strings is a type representing an enum with strings as values.
|
||||
type Strings struct {
|
||||
Name string
|
||||
Docs string
|
||||
Values []struct {
|
||||
Name string
|
||||
Value string
|
||||
Docs string
|
||||
}
|
||||
}
|
8
vendor/github.com/mjl-/sherpaprom/LICENSE.md
generated
vendored
Normal file
8
vendor/github.com/mjl-/sherpaprom/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
Copyright 2017 Irias Informatiemanagement
|
||||
Copyright 2019 Mechiel Lukkien
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
13
vendor/github.com/mjl-/sherpaprom/README.md
generated
vendored
Normal file
13
vendor/github.com/mjl-/sherpaprom/README.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# sherpaprom
|
||||
|
||||
Go package with a Prometheus [1] collector for Sherpa API's [2,3]. It provides a prometheus collector that implements interface Collector.
|
||||
|
||||
Read the godoc documentation at https://godoc.org/github.com/mjl-/sherpaprom
|
||||
|
||||
[1] Prometheus: https://prometheus.io/
|
||||
[2] Sherpa protocol: https://www.ueber.net/who/mjl/sherpa/
|
||||
[3] Sherpa Go package: https://github.com/mjl-/sherpa
|
||||
|
||||
# LICENSE
|
||||
|
||||
Created by Mechiel Lukkien, originally at Irias, and released under an MIT-license, see LICENSE.md.
|
123
vendor/github.com/mjl-/sherpaprom/collector.go
generated
vendored
Normal file
123
vendor/github.com/mjl-/sherpaprom/collector.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Package sherpaprom provides a collector of statistics for incoming Sherpa requests that are exported over to Prometheus.
|
||||
package sherpaprom
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Collector implements the Collector interface from the sherpa package.
|
||||
type Collector struct {
|
||||
requests, errors *prometheus.CounterVec
|
||||
protocolErrors, badFunction, javascript, json prometheus.Counter
|
||||
requestDuration *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
// NewCollector creates a new collector for the named API.
|
||||
// Metrics will be labeled with "api".
|
||||
// The following prometheus metrics are automatically registered on reg, or the default prometheus registerer if reg is nil:
|
||||
//
|
||||
// sherpa_requests_total
|
||||
// calls, per function
|
||||
// sherpa_errors_total
|
||||
// error responses, per function,code
|
||||
// sherpa_protocol_errors_total
|
||||
// incorrect requests
|
||||
// sherpa_bad_function_total
|
||||
// unknown functions called
|
||||
// sherpa_javascript_request_total
|
||||
// requests to sherpa.js
|
||||
// sherpa_json_request_total
|
||||
// requests to sherpa.json
|
||||
// sherpa_requests_duration_seconds
|
||||
// histogram for .01, .05, .1, .2, .5, 1, 2, 4, 8, 16, per function
|
||||
func NewCollector(api string, reg prometheus.Registerer) (*Collector, error) {
|
||||
if reg == nil {
|
||||
reg = prometheus.DefaultRegisterer
|
||||
}
|
||||
apiLabel := prometheus.Labels{"api": api}
|
||||
c := &Collector{
|
||||
requests: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "sherpa_requests_total",
|
||||
Help: "Total sherpa requests.",
|
||||
ConstLabels: apiLabel,
|
||||
}, []string{"function"}),
|
||||
errors: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "sherpa_errors_total",
|
||||
Help: "Total sherpa error responses.",
|
||||
ConstLabels: apiLabel,
|
||||
}, []string{"function", "code"}),
|
||||
protocolErrors: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sherpa_protocol_errors_total",
|
||||
Help: "Total sherpa protocol errors.",
|
||||
ConstLabels: apiLabel,
|
||||
}),
|
||||
badFunction: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sherpa_bad_function_total",
|
||||
Help: "Total sherpa bad function calls.",
|
||||
ConstLabels: apiLabel,
|
||||
}),
|
||||
javascript: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sherpa_javascript_request_total",
|
||||
Help: "Total sherpa.js requests.",
|
||||
ConstLabels: apiLabel,
|
||||
}),
|
||||
json: prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "sherpa_json_requests_total",
|
||||
Help: "Total sherpa.json requests.",
|
||||
ConstLabels: apiLabel,
|
||||
}),
|
||||
requestDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: "sherpa_requests_duration_seconds",
|
||||
Help: "Sherpa request duration in seconds.",
|
||||
ConstLabels: apiLabel,
|
||||
Buckets: []float64{.01, .05, .1, .2, .5, 1, 2, 4, 8, 16},
|
||||
}, []string{"function"}),
|
||||
}
|
||||
first := func(errors ...error) error {
|
||||
for _, err := range errors {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := first(
|
||||
reg.Register(c.requests),
|
||||
reg.Register(c.errors),
|
||||
reg.Register(c.protocolErrors),
|
||||
reg.Register(c.badFunction),
|
||||
reg.Register(c.javascript),
|
||||
reg.Register(c.json),
|
||||
reg.Register(c.requestDuration),
|
||||
)
|
||||
return c, err
|
||||
}
|
||||
|
||||
// BadFunction increases counter "sherpa_bad_function_total" by one.
|
||||
func (c *Collector) BadFunction() {
|
||||
c.badFunction.Inc()
|
||||
}
|
||||
|
||||
// ProtocolError increases counter "sherpa_protocol_errors_total" by one.
|
||||
func (c *Collector) ProtocolError() {
|
||||
c.protocolErrors.Inc()
|
||||
}
|
||||
|
||||
// JSON increases "sherpa_json_requests_total" by one.
|
||||
func (c *Collector) JSON() {
|
||||
c.json.Inc()
|
||||
}
|
||||
|
||||
// JavaScript increases "sherpa_javascript_requests_total" by one.
|
||||
func (c *Collector) JavaScript() {
|
||||
c.javascript.Inc()
|
||||
}
|
||||
|
||||
// FunctionCall increases "sherpa_requests_total" by one, adds the call duration to "sherpa_requests_duration_seconds" and possibly increases "sherpa_error_total" and "sherpa_servererror_total".
|
||||
func (c *Collector) FunctionCall(name string, duration float64, errorCode string) {
|
||||
c.requests.WithLabelValues(name).Inc()
|
||||
if errorCode != "" {
|
||||
c.errors.WithLabelValues(name, errorCode).Inc()
|
||||
}
|
||||
c.requestDuration.WithLabelValues(name).Observe(duration)
|
||||
}
|
1
vendor/github.com/mjl-/xfmt/.gitignore
generated
vendored
Normal file
1
vendor/github.com/mjl-/xfmt/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
/xfmt
|
7
vendor/github.com/mjl-/xfmt/LICENSE
generated
vendored
Normal file
7
vendor/github.com/mjl-/xfmt/LICENSE
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
Copyright (c) 2019 Mechiel Lukkien
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
26
vendor/github.com/mjl-/xfmt/README.txt
generated
vendored
Normal file
26
vendor/github.com/mjl-/xfmt/README.txt
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
xfmt formats long lines, playing nice with text in code.
|
||||
|
||||
To install:
|
||||
|
||||
go get github.com/mjl-/xfmt/cmd/xfmt
|
||||
|
||||
Xfmt reads from stdin, writes formatted output to stdout.
|
||||
|
||||
Xfmt wraps long lines at 80 characters, configurable through -width. But it
|
||||
counts text width excluding indenting and markup. Fmt formats to a max line
|
||||
length that includes indenting. We don't care about total max line length
|
||||
nowadays, we care about a human readable paragraph, which has a certain text
|
||||
width regardless of indent.
|
||||
|
||||
Xfmt recognizes lines with first non-whitespace of "//" and "#" as line
|
||||
comments, and repeats that prefix on later lines.
|
||||
|
||||
Xfmt keep does not merge lines if the first non-prefix text starts with
|
||||
interpunction or numbers. E.g. "- item1" or "1. point 1".
|
||||
|
||||
Xfmt does not merge multiple spaces, it assumes you intended what you typed.
|
||||
|
||||
# todo
|
||||
|
||||
- possibly recognize itemized lists in comments and indent the later lines with whitespace
|
||||
- something else
|
207
vendor/github.com/mjl-/xfmt/xfmt.go
generated
vendored
Normal file
207
vendor/github.com/mjl-/xfmt/xfmt.go
generated
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
// Package xfmt reformats text, wrapping it while recognizing comments.
|
||||
package xfmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Config tells format how to reformat text.
|
||||
type Config struct {
|
||||
MaxWidth int // Max width of content (excluding indenting), after which lines are wrapped.
|
||||
BreakPrefixes []string // String prefixes that cause a line to break, instead of being merged into the previous line.
|
||||
}
|
||||
|
||||
// Format reads text from r and writes reformatted text to w, according to
|
||||
// instructions in config. Lines ending with \r\n are formatted with \r\n as well.
|
||||
func Format(w io.Writer, r io.Reader, config Config) error {
|
||||
f := &formatter{
|
||||
in: bufio.NewReader(r),
|
||||
out: bufio.NewWriter(w),
|
||||
config: config,
|
||||
}
|
||||
return f.format()
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
in *bufio.Reader
|
||||
out *bufio.Writer
|
||||
config Config
|
||||
curLine string
|
||||
curLineend string
|
||||
}
|
||||
|
||||
type parseError error
|
||||
|
||||
func (f *formatter) format() (rerr error) {
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if pe, ok := e.(parseError); ok {
|
||||
rerr = pe
|
||||
} else {
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
line, end := f.gatherLine()
|
||||
if line == "" && end == "" {
|
||||
break
|
||||
}
|
||||
prefix, rem := parseLine(line)
|
||||
for _, s := range f.splitLine(rem) {
|
||||
f.write(prefix)
|
||||
f.write(s)
|
||||
f.write(end)
|
||||
}
|
||||
}
|
||||
return f.out.Flush()
|
||||
|
||||
}
|
||||
|
||||
func (f *formatter) check(err error, action string) {
|
||||
if err != nil {
|
||||
panic(parseError(fmt.Errorf("%s: %s", action, err)))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *formatter) write(s string) {
|
||||
_, err := f.out.Write([]byte(s))
|
||||
f.check(err, "write")
|
||||
}
|
||||
|
||||
func (f *formatter) peekLine() (string, string) {
|
||||
if f.curLine != "" || f.curLineend != "" {
|
||||
return f.curLine, f.curLineend
|
||||
}
|
||||
|
||||
line, err := f.in.ReadString('\n')
|
||||
if err != io.EOF {
|
||||
f.check(err, "read")
|
||||
}
|
||||
if line == "" {
|
||||
return "", ""
|
||||
}
|
||||
if strings.HasSuffix(line, "\r\n") {
|
||||
f.curLine, f.curLineend = line[:len(line)-2], "\r\n"
|
||||
} else if strings.HasSuffix(line, "\n") {
|
||||
f.curLine, f.curLineend = line[:len(line)-1], "\n"
|
||||
} else {
|
||||
f.curLine, f.curLineend = line, ""
|
||||
}
|
||||
return f.curLine, f.curLineend
|
||||
}
|
||||
|
||||
func (f *formatter) consumeLine() {
|
||||
if f.curLine == "" && f.curLineend == "" {
|
||||
panic("bad")
|
||||
}
|
||||
f.curLine = ""
|
||||
f.curLineend = ""
|
||||
}
|
||||
|
||||
func (f *formatter) gatherLine() (string, string) {
|
||||
var curLine, curLineend string
|
||||
var curPrefix string
|
||||
|
||||
for {
|
||||
line, end := f.peekLine()
|
||||
if line == "" && end == "" {
|
||||
break
|
||||
}
|
||||
if curLine == "" {
|
||||
curLineend = end
|
||||
}
|
||||
prefix, rem := parseLine(line)
|
||||
if prefix == "" && rem == "" {
|
||||
if curLine == "" {
|
||||
f.consumeLine()
|
||||
}
|
||||
break
|
||||
}
|
||||
if curLine != "" && (curPrefix != prefix || rem == "" || f.causeBreak(rem)) {
|
||||
break
|
||||
}
|
||||
curPrefix = prefix
|
||||
if curLine != "" {
|
||||
curLine += " "
|
||||
}
|
||||
curLine += rem
|
||||
f.consumeLine()
|
||||
// Control at begin or end of line are not merged.
|
||||
if curLine != "" && curLine[len(curLine)-1] < 0x20 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return curPrefix + curLine, curLineend
|
||||
}
|
||||
|
||||
func (f *formatter) causeBreak(s string) bool {
|
||||
c := s[0]
|
||||
if c < 0x20 {
|
||||
return true
|
||||
}
|
||||
for _, ss := range f.config.BreakPrefixes {
|
||||
if strings.HasPrefix(s, ss) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Don't merge lines starting with eg "1. ".
|
||||
for i, c := range s {
|
||||
if c >= '0' && c <= '9' {
|
||||
continue
|
||||
}
|
||||
if i > 0 && c == '.' && strings.HasPrefix(s[i:], ". ") {
|
||||
return true
|
||||
}
|
||||
break
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func parseLine(s string) (string, string) {
|
||||
orig := s
|
||||
s = strings.TrimLeft(orig, " \t")
|
||||
prefix := orig[:len(orig)-len(s)]
|
||||
if strings.HasPrefix(s, "//") {
|
||||
prefix += "//"
|
||||
s = s[2:]
|
||||
} else if strings.HasPrefix(s, "#") {
|
||||
prefix += "#"
|
||||
s = s[1:]
|
||||
}
|
||||
ns := strings.TrimLeft(s, " \t")
|
||||
prefix += s[:len(s)-len(ns)]
|
||||
s = ns
|
||||
return prefix, s
|
||||
}
|
||||
|
||||
func (f *formatter) splitLine(s string) []string {
|
||||
if len(s) <= f.config.MaxWidth {
|
||||
return []string{s}
|
||||
}
|
||||
|
||||
line := ""
|
||||
r := []string{}
|
||||
for _, w := range strings.Split(s, " ") {
|
||||
if line != "" && len(line)+1+len(w) > f.config.MaxWidth {
|
||||
r = append(r, line)
|
||||
line = w
|
||||
continue
|
||||
}
|
||||
if line != "" {
|
||||
line += " "
|
||||
}
|
||||
line += w
|
||||
}
|
||||
if line != "" {
|
||||
r = append(r, line)
|
||||
}
|
||||
return r
|
||||
}
|
Reference in New Issue
Block a user