This commit is contained in:
Mechiel Lukkien
2023-01-30 14:27:06 +01:00
commit cb229cb6cf
1256 changed files with 491723 additions and 0 deletions

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

316
vendor/github.com/beorn7/perks/quantile/stream.go generated vendored Normal file
View File

@ -0,0 +1,316 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targetMap map[float64]float64) *Stream {
// Convert map to slice to avoid slow iterations on a map.
// ƒ is called on the hot path, so converting the map to a slice
// beforehand results in significant CPU savings.
targets := targetMapToSlice(targetMap)
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for _, t := range targets {
if t.quantile*s.n <= r {
f = (2 * t.epsilon * r) / t.quantile
} else {
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
type target struct {
quantile float64
epsilon float64
}
func targetMapToSlice(targetMap map[float64]float64) []target {
targets := make([]target, 0, len(targetMap))
for quantile, epsilon := range targetMap {
t := target{
quantile: quantile,
epsilon: epsilon,
}
targets = append(targets, t)
}
return targets
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

69
vendor/github.com/cespare/xxhash/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,69 @@
# xxhash
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
This package provides a straightforward API:
```
func Sum64(b []byte) uint64
func Sum64String(s string) uint64
type Digest struct{ ... }
func New() *Digest
```
The `Digest` type implements hash.Hash64. Its key methods are:
```
func (*Digest) Write([]byte) (int, error)
func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
| input size | purego | asm |
| --- | --- | --- |
| 5 B | 979.66 MB/s | 1291.17 MB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
the following commands under Go 1.11.2:
```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

235
vendor/github.com/cespare/xxhash/v2/xxhash.go generated vendored Normal file
View File

@ -0,0 +1,235 @@
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
// at http://cyan4973.github.io/xxHash/.
package xxhash
import (
"encoding/binary"
"errors"
"math/bits"
)
const (
prime1 uint64 = 11400714785074694791
prime2 uint64 = 14029467366897019727
prime3 uint64 = 1609587929392839161
prime4 uint64 = 9650029242287828579
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64.
type Digest struct {
v1 uint64
v2 uint64
v3 uint64
v4 uint64
total uint64
mem [32]byte
n int // how much of mem is used
}
// New creates a new Digest that computes the 64-bit xxHash algorithm.
func New() *Digest {
var d Digest
d.Reset()
return &d
}
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.total = 0
d.n = 0
}
// Size always returns 8 bytes.
func (d *Digest) Size() int { return 8 }
// BlockSize always returns 32 bytes.
func (d *Digest) BlockSize() int { return 32 }
// Write adds more data to d. It always returns len(b), nil.
func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
d.n = 0
}
if len(b) >= 32 {
// One or more full blocks left.
nw := writeBlocks(d, b)
b = b[nw:]
}
// Store any remaining partial block.
copy(d.mem[:], b)
d.n = len(b)
return
}
// Sum appends the current hash to b and returns the resulting slice.
func (d *Digest) Sum(b []byte) []byte {
s := d.Sum64()
return append(
b,
byte(s>>56),
byte(s>>48),
byte(s>>40),
byte(s>>32),
byte(s>>24),
byte(s>>16),
byte(s>>8),
byte(s),
)
}
// Sum64 returns the current hash.
func (d *Digest) Sum64() uint64 {
var h uint64
if d.total >= 32 {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = d.v3 + prime5
}
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
const (
magic = "xxh\x06"
marshaledSize = len(magic) + 8*5 + 32
)
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (d *Digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = appendUint64(b, d.v1)
b = appendUint64(b, d.v2)
b = appendUint64(b, d.v3)
b = appendUint64(b, d.v4)
b = appendUint64(b, d.total)
b = append(b, d.mem[:d.n]...)
b = b[:len(b)+len(d.mem)-d.n]
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (d *Digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic):]
b, d.v1 = consumeUint64(b)
b, d.v2 = consumeUint64(b)
b, d.v3 = consumeUint64(b)
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := u64(b)
return b[8:], x
}
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
func round(acc, input uint64) uint64 {
acc += input * prime2
acc = rol31(acc)
acc *= prime1
return acc
}
func mergeRound(acc, val uint64) uint64 {
val = round(0, val)
acc ^= val
acc = acc*prime1 + prime4
return acc
}
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// +build !appengine
// +build gc
// +build !purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
//
//go:noescape
func Sum64(b []byte) uint64
//go:noescape
func writeBlocks(d *Digest, b []byte) int

215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s generated vendored Normal file
View File

@ -0,0 +1,215 @@
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Register allocation:
// AX h
// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// DI prime4v
// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (SI), R12 \
ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), DI
// Load slice.
MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
// Check whether we have at least one block.
CMPQ DX, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
// Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ DI, AX
CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ SI, BX
JG singles
MOVL (SI), R8
ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
JGE finalize
singlesLoop:
MOVBQZX (SI), R12
ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ SI, BX
JL singlesLoop
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ AX, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), SI
MOVQ SI, ret+32(FP)
RET

76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// +build !amd64 appengine !gc purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
// d.Write(b)
// return d.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = prime5
}
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
h = rol11(h) * prime1
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func writeBlocks(d *Digest, b []byte) int {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
n := len(b)
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
return n - len(b)
}

15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
package xxhash
// Sum64String computes the 64-bit xxHash digest of s.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
// WriteString adds more data to d. It always returns len(s), nil.
func (d *Digest) WriteString(s string) (n int, err error) {
return d.Write([]byte(s))
}

57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
// +build !appengine
// This file encapsulates usage of unsafe.
// xxhash_safe.go contains the safe implementations.
package xxhash
import (
"unsafe"
)
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
//
// var b []byte
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
// bh.Len = len(s)
// bh.Cap = len(s)
//
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
// weight to this sequence of expressions that any function that uses it will
// not be inlined. Instead, the functions below use a different unsafe
// conversion designed to minimize the inliner weight and allow both to be
// inlined. There is also a test (TestInlining) which verifies that these are
// inlined.
//
// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
// d.Write always returns len(s), nil.
// Ignoring the return output and returning these fixed values buys a
// savings of 6 in the inliner's cost model.
return len(s), nil
}
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
// of the first two words is the same as the layout of a string.
type sliceHeader struct {
s string
cap int
}

3
vendor/github.com/golang/protobuf/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/github.com/golang/protobuf/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

28
vendor/github.com/golang/protobuf/LICENSE generated vendored Normal file
View File

@ -0,0 +1,28 @@
Copyright 2010 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

324
vendor/github.com/golang/protobuf/proto/buffer.go generated vendored Normal file
View File

@ -0,0 +1,324 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"errors"
"fmt"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
WireVarint = 0
WireFixed32 = 5
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
)
// EncodeVarint returns the varint encoded bytes of v.
func EncodeVarint(v uint64) []byte {
return protowire.AppendVarint(nil, v)
}
// SizeVarint returns the length of the varint encoded bytes of v.
// This is equal to len(EncodeVarint(v)).
func SizeVarint(v uint64) int {
return protowire.SizeVarint(v)
}
// DecodeVarint parses a varint encoded integer from b,
// returning the integer value and the length of the varint.
// It returns (0, 0) if there is a parse error.
func DecodeVarint(b []byte) (uint64, int) {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return 0, 0
}
return v, n
}
// Buffer is a buffer for encoding and decoding the protobuf wire format.
// It may be reused between invocations to reduce memory usage.
type Buffer struct {
buf []byte
idx int
deterministic bool
}
// NewBuffer allocates a new Buffer initialized with buf,
// where the contents of buf are considered the unread portion of the buffer.
func NewBuffer(buf []byte) *Buffer {
return &Buffer{buf: buf}
}
// SetDeterministic specifies whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (b *Buffer) SetDeterministic(deterministic bool) {
b.deterministic = deterministic
}
// SetBuf sets buf as the internal buffer,
// where the contents of buf are considered the unread portion of the buffer.
func (b *Buffer) SetBuf(buf []byte) {
b.buf = buf
b.idx = 0
}
// Reset clears the internal buffer of all written and unread data.
func (b *Buffer) Reset() {
b.buf = b.buf[:0]
b.idx = 0
}
// Bytes returns the internal buffer.
func (b *Buffer) Bytes() []byte {
return b.buf
}
// Unread returns the unread portion of the buffer.
func (b *Buffer) Unread() []byte {
return b.buf[b.idx:]
}
// Marshal appends the wire-format encoding of m to the buffer.
func (b *Buffer) Marshal(m Message) error {
var err error
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// Unmarshal parses the wire-format message in the buffer and
// places the decoded results in m.
// It does not reset m before unmarshaling.
func (b *Buffer) Unmarshal(m Message) error {
err := UnmarshalMerge(b.Unread(), m)
b.idx = len(b.buf)
return err
}
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
func (m *unknownFields) String() string { panic("not implemented") }
func (m *unknownFields) Reset() { panic("not implemented") }
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
// DebugPrint dumps the encoded bytes of b with a header and footer including s
// to stdout. This is only intended for debugging.
func (*Buffer) DebugPrint(s string, b []byte) {
m := MessageReflect(new(unknownFields))
m.SetUnknown(b)
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
}
// EncodeVarint appends an unsigned varint encoding to the buffer.
func (b *Buffer) EncodeVarint(v uint64) error {
b.buf = protowire.AppendVarint(b.buf, v)
return nil
}
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag32(v uint64) error {
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
}
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
func (b *Buffer) EncodeZigzag64(v uint64) error {
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
}
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed32(v uint64) error {
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
return nil
}
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
func (b *Buffer) EncodeFixed64(v uint64) error {
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
return nil
}
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
func (b *Buffer) EncodeRawBytes(v []byte) error {
b.buf = protowire.AppendBytes(b.buf, v)
return nil
}
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
// It does not validate whether v contains valid UTF-8.
func (b *Buffer) EncodeStringBytes(v string) error {
b.buf = protowire.AppendString(b.buf, v)
return nil
}
// EncodeMessage appends a length-prefixed encoded message to the buffer.
func (b *Buffer) EncodeMessage(m Message) error {
var err error
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
return err
}
// DecodeVarint consumes an encoded unsigned varint from the buffer.
func (b *Buffer) DecodeVarint() (uint64, error) {
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag32() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
}
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
func (b *Buffer) DecodeZigzag64() (uint64, error) {
v, err := b.DecodeVarint()
if err != nil {
return 0, err
}
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
}
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed32() (uint64, error) {
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
func (b *Buffer) DecodeFixed64() (uint64, error) {
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
if n < 0 {
return 0, protowire.ParseError(n)
}
b.idx += n
return uint64(v), nil
}
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
// If alloc is specified, it returns a copy the raw bytes
// rather than a sub-slice of the buffer.
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
if n < 0 {
return nil, protowire.ParseError(n)
}
b.idx += n
if alloc {
v = append([]byte(nil), v...)
}
return v, nil
}
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
// It does not validate whether the raw bytes contain valid UTF-8.
func (b *Buffer) DecodeStringBytes() (string, error) {
v, n := protowire.ConsumeString(b.buf[b.idx:])
if n < 0 {
return "", protowire.ParseError(n)
}
b.idx += n
return v, nil
}
// DecodeMessage consumes a length-prefixed message from the buffer.
// It does not reset m before unmarshaling.
func (b *Buffer) DecodeMessage(m Message) error {
v, err := b.DecodeRawBytes(false)
if err != nil {
return err
}
return UnmarshalMerge(v, m)
}
// DecodeGroup consumes a message group from the buffer.
// It assumes that the start group marker has already been consumed and
// consumes all bytes until (and including the end group marker).
// It does not reset m before unmarshaling.
func (b *Buffer) DecodeGroup(m Message) error {
v, n, err := consumeGroup(b.buf[b.idx:])
if err != nil {
return err
}
b.idx += n
return UnmarshalMerge(v, m)
}
// consumeGroup parses b until it finds an end group marker, returning
// the raw bytes of the message (excluding the end group marker) and the
// the total length of the message (including the end group marker).
func consumeGroup(b []byte) ([]byte, int, error) {
b0 := b
depth := 1 // assume this follows a start group marker
for {
_, wtyp, tagLen := protowire.ConsumeTag(b)
if tagLen < 0 {
return nil, 0, protowire.ParseError(tagLen)
}
b = b[tagLen:]
var valLen int
switch wtyp {
case protowire.VarintType:
_, valLen = protowire.ConsumeVarint(b)
case protowire.Fixed32Type:
_, valLen = protowire.ConsumeFixed32(b)
case protowire.Fixed64Type:
_, valLen = protowire.ConsumeFixed64(b)
case protowire.BytesType:
_, valLen = protowire.ConsumeBytes(b)
case protowire.StartGroupType:
depth++
case protowire.EndGroupType:
depth--
default:
return nil, 0, errors.New("proto: cannot parse reserved wire type")
}
if valLen < 0 {
return nil, 0, protowire.ParseError(valLen)
}
b = b[valLen:]
if depth == 0 {
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
}
}
}

63
vendor/github.com/golang/protobuf/proto/defaults.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// SetDefaults sets unpopulated scalar fields to their default values.
// Fields within a oneof are not set even if they have a default value.
// SetDefaults is recursively called upon any populated message fields.
func SetDefaults(m Message) {
if m != nil {
setDefaults(MessageReflect(m))
}
}
func setDefaults(m protoreflect.Message) {
fds := m.Descriptor().Fields()
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if !m.Has(fd) {
if fd.HasDefault() && fd.ContainingOneof() == nil {
v := fd.Default()
if fd.Kind() == protoreflect.BytesKind {
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
}
m.Set(fd, v)
}
continue
}
}
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
setDefaults(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
setDefaults(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
setDefaults(v.Message())
return true
})
}
}
return true
})
}

113
vendor/github.com/golang/protobuf/proto/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"encoding/json"
"errors"
"fmt"
"strconv"
protoV2 "google.golang.org/protobuf/proto"
)
var (
// Deprecated: No longer returned.
ErrNil = errors.New("proto: Marshal called with nil")
// Deprecated: No longer returned.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
// Deprecated: No longer returned.
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
)
// Deprecated: Do not use.
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
// Deprecated: Do not use.
func GetStats() Stats { return Stats{} }
// Deprecated: Do not use.
func MarshalMessageSet(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func UnmarshalMessageSet([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
return nil, errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func UnmarshalMessageSetJSON([]byte, interface{}) error {
return errors.New("proto: not implemented")
}
// Deprecated: Do not use.
func RegisterMessageSetType(Message, int32, string) {}
// Deprecated: Do not use.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// Deprecated: Do not use.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// Deprecated: Do not use; this type existed for intenal-use only.
type InternalMessageInfo struct{}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) DiscardUnknown(m Message) {
DiscardUnknown(m)
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Merge(dst, src Message) {
protoV2.Merge(MessageV2(dst), MessageV2(src))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Size(m Message) int {
return protoV2.Size(MessageV2(m))
}
// Deprecated: Do not use; this method existed for intenal-use only.
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
}

58
vendor/github.com/golang/protobuf/proto/discard.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"google.golang.org/protobuf/reflect/protoreflect"
)
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
func DiscardUnknown(m Message) {
if m != nil {
discardUnknown(MessageReflect(m))
}
}
func discardUnknown(m protoreflect.Message) {
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
switch {
// Handle singular message.
case fd.Cardinality() != protoreflect.Repeated:
if fd.Message() != nil {
discardUnknown(m.Get(fd).Message())
}
// Handle list of messages.
case fd.IsList():
if fd.Message() != nil {
ls := m.Get(fd).List()
for i := 0; i < ls.Len(); i++ {
discardUnknown(ls.Get(i).Message())
}
}
// Handle map of messages.
case fd.IsMap():
if fd.MapValue().Message() != nil {
ms := m.Get(fd).Map()
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
discardUnknown(v.Message())
return true
})
}
}
return true
})
// Discard unknown fields.
if len(m.GetUnknown()) > 0 {
m.SetUnknown(nil)
}
}

356
vendor/github.com/golang/protobuf/proto/extensions.go generated vendored Normal file
View File

@ -0,0 +1,356 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"errors"
"fmt"
"reflect"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
type (
// ExtensionDesc represents an extension descriptor and
// is used to interact with an extension field in a message.
//
// Variables of this type are generated in code by protoc-gen-go.
ExtensionDesc = protoimpl.ExtensionInfo
// ExtensionRange represents a range of message extensions.
// Used in code generated by protoc-gen-go.
ExtensionRange = protoiface.ExtensionRangeV1
// Deprecated: Do not use; this is an internal type.
Extension = protoimpl.ExtensionFieldV1
// Deprecated: Do not use; this is an internal type.
XXX_InternalExtensions = protoimpl.ExtensionFields
)
// ErrMissingExtension reports whether the extension was not present.
var ErrMissingExtension = errors.New("proto: missing extension")
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
// HasExtension reports whether the extension field is present in m
// either as an explicitly populated field or as an unknown field.
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return false
}
// Check whether any populated known field matches the field number.
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
has = mr.Has(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
has = int32(fd.Number()) == xt.Field
return !has
})
}
// Check whether any unknown field matches the field number.
for b := mr.GetUnknown(); !has && len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
has = int32(num) == xt.Field
b = b[n:]
}
return has
}
// ClearExtension removes the extension field from m
// either as an explicitly populated field or as an unknown field.
func ClearExtension(m Message, xt *ExtensionDesc) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
xtd := xt.TypeDescriptor()
if isValidExtension(mr.Descriptor(), xtd) {
mr.Clear(xtd)
} else {
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if int32(fd.Number()) == xt.Field {
mr.Clear(fd)
return false
}
return true
})
}
clearUnknown(mr, fieldNum(xt.Field))
}
// ClearAllExtensions clears all extensions from m.
// This includes populated fields and unknown fields in the extension range.
func ClearAllExtensions(m Message) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if fd.IsExtension() {
mr.Clear(fd)
}
return true
})
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
}
// GetExtension retrieves a proto2 extended field from m.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes for the extension field.
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
// Retrieve the unknown fields for this extension field.
var bo protoreflect.RawFields
for bi := mr.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if int32(num) == xt.Field {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
// For type incomplete descriptors, only retrieve the unknown fields.
if xt.ExtensionType == nil {
return []byte(bo), nil
}
// If the extension field only exists as unknown fields, unmarshal it.
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
if !mr.Has(xtd) && len(bo) > 0 {
m2 := mr.New()
if err := (proto.UnmarshalOptions{
Resolver: extensionResolver{xt},
}.Unmarshal(bo, m2.Interface())); err != nil {
return nil, err
}
if m2.Has(xtd) {
mr.Set(xtd, m2.Get(xtd))
clearUnknown(mr, fieldNum(xt.Field))
}
}
// Check whether the message has the extension field set or a default.
var pv protoreflect.Value
switch {
case mr.Has(xtd):
pv = mr.Get(xtd)
case xtd.HasDefault():
pv = xtd.Default()
default:
return nil, ErrMissingExtension
}
v := xt.InterfaceOf(pv)
rv := reflect.ValueOf(v)
if isScalarKind(rv.Kind()) {
rv2 := reflect.New(rv.Type())
rv2.Elem().Set(rv)
v = rv2.Interface()
}
return v, nil
}
// extensionResolver is a custom extension resolver that stores a single
// extension type that takes precedence over the global registry.
type extensionResolver struct{ xt protoreflect.ExtensionType }
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
return r.xt, nil
}
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
// GetExtensions returns a list of the extensions values present in m,
// corresponding with the provided list of extension descriptors, xts.
// If an extension is missing in m, the corresponding value is nil.
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return nil, errNotExtendable
}
vs := make([]interface{}, len(xts))
for i, xt := range xts {
v, err := GetExtension(m, xt)
if err != nil {
if err == ErrMissingExtension {
continue
}
return vs, err
}
vs[i] = v
}
return vs, nil
}
// SetExtension sets an extension field in m to the provided value.
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return errNotExtendable
}
rv := reflect.ValueOf(v)
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
}
if rv.Kind() == reflect.Ptr {
if rv.IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
}
if isScalarKind(rv.Elem().Kind()) {
v = rv.Elem().Interface()
}
}
xtd := xt.TypeDescriptor()
if !isValidExtension(mr.Descriptor(), xtd) {
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
}
mr.Set(xtd, xt.ValueOf(v))
clearUnknown(mr, fieldNum(xt.Field))
return nil
}
// SetRawExtension inserts b into the unknown fields of m.
//
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
func SetRawExtension(m Message, fnum int32, b []byte) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return
}
// Verify that the raw field is valid.
for b0 := b; len(b0) > 0; {
num, _, n := protowire.ConsumeField(b0)
if int32(num) != fnum {
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
}
b0 = b0[n:]
}
ClearExtension(m, &ExtensionDesc{Field: fnum})
mr.SetUnknown(append(mr.GetUnknown(), b...))
}
// ExtensionDescs returns a list of extension descriptors found in m,
// containing descriptors for both populated extension fields in m and
// also unknown fields of m that are in the extension range.
// For the later case, an type incomplete descriptor is provided where only
// the ExtensionDesc.Field field is populated.
// The order of the extension descriptors is undefined.
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
return nil, errNotExtendable
}
// Collect a set of known extension descriptors.
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
xt := fd.(protoreflect.ExtensionTypeDescriptor)
if xd, ok := xt.Type().(*ExtensionDesc); ok {
extDescs[fd.Number()] = xd
}
}
return true
})
// Collect a set of unknown extension descriptors.
extRanges := mr.Descriptor().ExtensionRanges()
for b := mr.GetUnknown(); len(b) > 0; {
num, _, n := protowire.ConsumeField(b)
if extRanges.Has(num) && extDescs[num] == nil {
extDescs[num] = nil
}
b = b[n:]
}
// Transpose the set of descriptors into a list.
var xts []*ExtensionDesc
for num, xt := range extDescs {
if xt == nil {
xt = &ExtensionDesc{Field: int32(num)}
}
xts = append(xts, xt)
}
return xts, nil
}
// isValidExtension reports whether xtd is a valid extension descriptor for md.
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
}
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
// This function exists for historical reasons since the representation of
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
func isScalarKind(k reflect.Kind) bool {
switch k {
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
return true
default:
return false
}
}
// clearUnknown removes unknown fields from m where remover.Has reports true.
func clearUnknown(m protoreflect.Message, remover interface {
Has(protoreflect.FieldNumber) bool
}) {
var bo protoreflect.RawFields
for bi := m.GetUnknown(); len(bi) > 0; {
num, _, n := protowire.ConsumeField(bi)
if !remover.Has(num) {
bo = append(bo, bi[:n]...)
}
bi = bi[n:]
}
if bi := m.GetUnknown(); len(bi) != len(bo) {
m.SetUnknown(bo)
}
}
type fieldNum protoreflect.FieldNumber
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
return protoreflect.FieldNumber(n1) == n2
}

306
vendor/github.com/golang/protobuf/proto/properties.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoimpl"
)
// StructProperties represents protocol buffer type information for a
// generated protobuf message in the open-struct API.
//
// Deprecated: Do not use.
type StructProperties struct {
// Prop are the properties for each field.
//
// Fields belonging to a oneof are stored in OneofTypes instead, with a
// single Properties representing the parent oneof held here.
//
// The order of Prop matches the order of fields in the Go struct.
// Struct fields that are not related to protobufs have a "XXX_" prefix
// in the Properties.Name and must be ignored by the user.
Prop []*Properties
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the protobuf field name.
OneofTypes map[string]*OneofProperties
}
// Properties represents the type information for a protobuf message field.
//
// Deprecated: Do not use.
type Properties struct {
// Name is a placeholder name with little meaningful semantic value.
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
Name string
// OrigName is the protobuf field name or oneof name.
OrigName string
// JSONName is the JSON name for the protobuf field.
JSONName string
// Enum is a placeholder name for enums.
// For historical reasons, this is neither the Go name for the enum,
// nor the protobuf name for the enum.
Enum string // Deprecated: Do not use.
// Weak contains the full name of the weakly referenced message.
Weak string
// Wire is a string representation of the wire type.
Wire string
// WireType is the protobuf wire type for the field.
WireType int
// Tag is the protobuf field number.
Tag int
// Required reports whether this is a required field.
Required bool
// Optional reports whether this is a optional field.
Optional bool
// Repeated reports whether this is a repeated field.
Repeated bool
// Packed reports whether this is a packed repeated field of scalars.
Packed bool
// Proto3 reports whether this field operates under the proto3 syntax.
Proto3 bool
// Oneof reports whether this field belongs within a oneof.
Oneof bool
// Default is the default value in string form.
Default string
// HasDefault reports whether the field has a default value.
HasDefault bool
// MapKeyProp is the properties for the key field for a map field.
MapKeyProp *Properties
// MapValProp is the properties for the value field for a map field.
MapValProp *Properties
}
// OneofProperties represents the type information for a protobuf oneof.
//
// Deprecated: Do not use.
type OneofProperties struct {
// Type is a pointer to the generated wrapper type for the field value.
// This is nil for messages that are not in the open-struct API.
Type reflect.Type
// Field is the index into StructProperties.Prop for the containing oneof.
Field int
// Prop is the properties for the field.
Prop *Properties
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s += "," + strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != "" {
s += ",json=" + p.JSONName
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if len(p.Weak) > 0 {
s += ",weak=" + p.Weak
}
if p.Proto3 {
s += ",proto3"
}
if p.Oneof {
s += ",oneof"
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(tag string) {
// For example: "bytes,49,opt,name=foo,def=hello!"
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
i = len(tag)
}
switch s := tag[:i]; {
case strings.HasPrefix(s, "name="):
p.OrigName = s[len("name="):]
case strings.HasPrefix(s, "json="):
p.JSONName = s[len("json="):]
case strings.HasPrefix(s, "enum="):
p.Enum = s[len("enum="):]
case strings.HasPrefix(s, "weak="):
p.Weak = s[len("weak="):]
case strings.Trim(s, "0123456789") == "":
n, _ := strconv.ParseUint(s, 10, 32)
p.Tag = int(n)
case s == "opt":
p.Optional = true
case s == "req":
p.Required = true
case s == "rep":
p.Repeated = true
case s == "varint" || s == "zigzag32" || s == "zigzag64":
p.Wire = s
p.WireType = WireVarint
case s == "fixed32":
p.Wire = s
p.WireType = WireFixed32
case s == "fixed64":
p.Wire = s
p.WireType = WireFixed64
case s == "bytes":
p.Wire = s
p.WireType = WireBytes
case s == "group":
p.Wire = s
p.WireType = WireStartGroup
case s == "packed":
p.Packed = true
case s == "proto3":
p.Proto3 = true
case s == "oneof":
p.Oneof = true
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
p.HasDefault = true
p.Default, i = tag[len("def="):], len(tag)
}
tag = strings.TrimPrefix(tag[i:], ",")
}
}
// Init populates the properties from a protocol buffer struct tag.
//
// Deprecated: Do not use.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
if typ != nil && typ.Kind() == reflect.Map {
p.MapKeyProp = new(Properties)
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
p.MapValProp = new(Properties)
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
}
}
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
// GetProperties returns the list of properties for the type represented by t,
// which must be a generated protocol buffer message in the open-struct API,
// where protobuf message fields are represented by exported Go struct fields.
//
// Deprecated: Use protobuf reflection instead.
func GetProperties(t reflect.Type) *StructProperties {
if p, ok := propertiesCache.Load(t); ok {
return p.(*StructProperties)
}
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
return p.(*StructProperties)
}
func newProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
var hasOneof bool
prop := new(StructProperties)
// Construct a list of properties for each field in the struct.
for i := 0; i < t.NumField(); i++ {
p := new(Properties)
f := t.Field(i)
tagField := f.Tag.Get("protobuf")
p.Init(f.Type, f.Name, tagField, &f)
tagOneof := f.Tag.Get("protobuf_oneof")
if tagOneof != "" {
hasOneof = true
p.OrigName = tagOneof
}
// Rename unrelated struct fields with the "XXX_" prefix since so much
// user code simply checks for this to exclude special fields.
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
p.Name = "XXX_" + p.Name
p.OrigName = "XXX_" + p.OrigName
} else if p.Weak != "" {
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
}
prop.Prop = append(prop.Prop, p)
}
// Construct a mapping of oneof field names to properties.
if hasOneof {
var oneofWrappers []interface{}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
}
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
}
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
}
}
prop.OneofTypes = make(map[string]*OneofProperties)
for _, wrapper := range oneofWrappers {
p := &OneofProperties{
Type: reflect.ValueOf(wrapper).Type(), // *T
Prop: new(Properties),
}
f := p.Type.Elem().Field(0)
p.Prop.Name = f.Name
p.Prop.Parse(f.Tag.Get("protobuf"))
// Determine the struct field that contains this oneof.
// Each wrapper is assignable to exactly one parent field.
var foundOneof bool
for i := 0; i < t.NumField() && !foundOneof; i++ {
if p.Type.AssignableTo(t.Field(i).Type) {
p.Field = i
foundOneof = true
}
}
if !foundOneof {
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
}
prop.OneofTypes[p.Prop.OrigName] = p
}
}
return prop
}
func (sp *StructProperties) Len() int { return len(sp.Prop) }
func (sp *StructProperties) Less(i, j int) bool { return false }
func (sp *StructProperties) Swap(i, j int) { return }

167
vendor/github.com/golang/protobuf/proto/proto.go generated vendored Normal file
View File

@ -0,0 +1,167 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package proto provides functionality for handling protocol buffer messages.
// In particular, it provides marshaling and unmarshaling between a protobuf
// message and the binary wire format.
//
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
// more information.
//
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
"google.golang.org/protobuf/runtime/protoimpl"
)
const (
ProtoPackageIsVersion1 = true
ProtoPackageIsVersion2 = true
ProtoPackageIsVersion3 = true
ProtoPackageIsVersion4 = true
)
// GeneratedEnum is any enum type generated by protoc-gen-go
// which is a named int32 kind.
// This type exists for documentation purposes.
type GeneratedEnum interface{}
// GeneratedMessage is any message type generated by protoc-gen-go
// which is a pointer to a named struct kind.
// This type exists for documentation purposes.
type GeneratedMessage interface{}
// Message is a protocol buffer message.
//
// This is the v1 version of the message interface and is marginally better
// than an empty interface as it lacks any method to programatically interact
// with the contents of the message.
//
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
// exposes protobuf reflection as a first-class feature of the interface.
//
// To convert a v1 message to a v2 message, use the MessageV2 function.
// To convert a v2 message to a v1 message, use the MessageV1 function.
type Message = protoiface.MessageV1
// MessageV1 converts either a v1 or v2 message to a v1 message.
// It returns nil if m is nil.
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
return protoimpl.X.ProtoMessageV1Of(m)
}
// MessageV2 converts either a v1 or v2 message to a v2 message.
// It returns nil if m is nil.
func MessageV2(m GeneratedMessage) protoV2.Message {
return protoimpl.X.ProtoMessageV2Of(m)
}
// MessageReflect returns a reflective view for a message.
// It returns nil if m is nil.
func MessageReflect(m Message) protoreflect.Message {
return protoimpl.X.MessageOf(m)
}
// Marshaler is implemented by messages that can marshal themselves.
// This interface is used by the following functions: Size, Marshal,
// Buffer.Marshal, and Buffer.EncodeMessage.
//
// Deprecated: Do not implement.
type Marshaler interface {
// Marshal formats the encoded bytes of the message.
// It should be deterministic and emit valid protobuf wire data.
// The caller takes ownership of the returned buffer.
Marshal() ([]byte, error)
}
// Unmarshaler is implemented by messages that can unmarshal themselves.
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
//
// Deprecated: Do not implement.
type Unmarshaler interface {
// Unmarshal parses the encoded bytes of the protobuf wire input.
// The provided buffer is only valid for during method call.
// It should not reset the receiver message.
Unmarshal([]byte) error
}
// Merger is implemented by messages that can merge themselves.
// This interface is used by the following functions: Clone and Merge.
//
// Deprecated: Do not implement.
type Merger interface {
// Merge merges the contents of src into the receiver message.
// It clones all data structures in src such that it aliases no mutable
// memory referenced by src.
Merge(src Message)
}
// RequiredNotSetError is an error type returned when
// marshaling or unmarshaling a message with missing required fields.
type RequiredNotSetError struct {
err error
}
func (e *RequiredNotSetError) Error() string {
if e.err != nil {
return e.err.Error()
}
return "proto: required field not set"
}
func (e *RequiredNotSetError) RequiredNotSet() bool {
return true
}
func checkRequiredNotSet(m protoV2.Message) error {
if err := protoV2.CheckInitialized(m); err != nil {
return &RequiredNotSetError{err: err}
}
return nil
}
// Clone returns a deep copy of src.
func Clone(src Message) Message {
return MessageV1(protoV2.Clone(MessageV2(src)))
}
// Merge merges src into dst, which must be messages of the same type.
//
// Populated scalar fields in src are copied to dst, while populated
// singular messages in src are merged into dst by recursively calling Merge.
// The elements of every list field in src is appended to the corresponded
// list fields in dst. The entries of every map field in src is copied into
// the corresponding map field in dst, possibly replacing existing entries.
// The unknown fields of src are appended to the unknown fields of dst.
func Merge(dst, src Message) {
protoV2.Merge(MessageV2(dst), MessageV2(src))
}
// Equal reports whether two messages are equal.
// If two messages marshal to the same bytes under deterministic serialization,
// then Equal is guaranteed to report true.
//
// Two messages are equal if they are the same protobuf message type,
// have the same set of populated known and extension field values,
// and the same set of unknown fields values.
//
// Scalar values are compared with the equivalent of the == operator in Go,
// except bytes values which are compared using bytes.Equal and
// floating point values which specially treat NaNs as equal.
// Message values are compared by recursively calling Equal.
// Lists are equal if each element value is also equal.
// Maps are equal if they have the same set of keys, where the pair of values
// for each key is also equal.
func Equal(x, y Message) bool {
return protoV2.Equal(MessageV2(x), MessageV2(y))
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

317
vendor/github.com/golang/protobuf/proto/registry.go generated vendored Normal file
View File

@ -0,0 +1,317 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"reflect"
"strings"
"sync"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// filePath is the path to the proto source file.
type filePath = string // e.g., "google/protobuf/descriptor.proto"
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
type fileDescGZIP = []byte
var fileCache sync.Map // map[filePath]fileDescGZIP
// RegisterFile is called from generated code to register the compressed
// FileDescriptorProto with the file path for a proto source file.
//
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
func RegisterFile(s filePath, d fileDescGZIP) {
// Decompress the descriptor.
zr, err := gzip.NewReader(bytes.NewReader(d))
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
b, err := ioutil.ReadAll(zr)
if err != nil {
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
}
// Construct a protoreflect.FileDescriptor from the raw descriptor.
// Note that DescBuilder.Build automatically registers the constructed
// file descriptor with the v2 registry.
protoimpl.DescBuilder{RawDescriptor: b}.Build()
// Locally cache the raw descriptor form for the file.
fileCache.Store(s, d)
}
// FileDescriptor returns the compressed FileDescriptorProto given the file path
// for a proto source file. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
func FileDescriptor(s filePath) fileDescGZIP {
if v, ok := fileCache.Load(s); ok {
return v.(fileDescGZIP)
}
// Find the descriptor in the v2 registry.
var b []byte
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
}
// Locally cache the raw descriptor form for the file.
if len(b) > 0 {
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
return v.(fileDescGZIP)
}
return nil
}
// enumName is the name of an enum. For historical reasons, the enum name is
// neither the full Go name nor the full protobuf name of the enum.
// The name is the dot-separated combination of just the proto package that the
// enum is declared within followed by the Go type name of the generated enum.
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
// enumsByName maps enum values by name to their numeric counterpart.
type enumsByName = map[string]int32
// enumsByNumber maps enum values by number to their name counterpart.
type enumsByNumber = map[int32]string
var enumCache sync.Map // map[enumName]enumsByName
var numFilesCache sync.Map // map[protoreflect.FullName]int
// RegisterEnum is called from the generated code to register the mapping of
// enum value names to enum numbers for the enum identified by s.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
if _, ok := enumCache.Load(s); ok {
panic("proto: duplicate enum registered: " + s)
}
enumCache.Store(s, m)
// This does not forward registration to the v2 registry since this API
// lacks sufficient information to construct a complete v2 enum descriptor.
}
// EnumValueMap returns the mapping from enum value names to enum numbers for
// the enum of the given name. It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
func EnumValueMap(s enumName) enumsByName {
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
// Check whether the cache is stale. If the number of files in the current
// package differs, then it means that some enums may have been recently
// registered upstream that we do not know about.
var protoPkg protoreflect.FullName
if i := strings.LastIndexByte(s, '.'); i >= 0 {
protoPkg = protoreflect.FullName(s[:i])
}
v, _ := numFilesCache.Load(protoPkg)
numFiles, _ := v.(int)
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
return nil // cache is up-to-date; was not found earlier
}
// Update the enum cache for all enums declared in the given proto package.
numFiles = 0
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
name := protoimpl.X.LegacyEnumName(ed)
if _, ok := enumCache.Load(name); !ok {
m := make(enumsByName)
evs := ed.Values()
for i := evs.Len() - 1; i >= 0; i-- {
ev := evs.Get(i)
m[string(ev.Name())] = int32(ev.Number())
}
enumCache.LoadOrStore(name, m)
}
})
numFiles++
return true
})
numFilesCache.Store(protoPkg, numFiles)
// Check cache again for enum map.
if v, ok := enumCache.Load(s); ok {
return v.(enumsByName)
}
return nil
}
// walkEnums recursively walks all enums declared in d.
func walkEnums(d interface {
Enums() protoreflect.EnumDescriptors
Messages() protoreflect.MessageDescriptors
}, f func(protoreflect.EnumDescriptor)) {
eds := d.Enums()
for i := eds.Len() - 1; i >= 0; i-- {
f(eds.Get(i))
}
mds := d.Messages()
for i := mds.Len() - 1; i >= 0; i-- {
walkEnums(mds.Get(i), f)
}
}
// messageName is the full name of protobuf message.
type messageName = string
var messageTypeCache sync.Map // map[messageName]reflect.Type
// RegisterType is called from generated code to register the message Go type
// for a message of the given name.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
func RegisterType(m Message, s messageName) {
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
panic(err)
}
messageTypeCache.Store(s, reflect.TypeOf(m))
}
// RegisterMapType is called from generated code to register the Go map type
// for a protobuf message representing a map entry.
//
// Deprecated: Do not use.
func RegisterMapType(m interface{}, s messageName) {
t := reflect.TypeOf(m)
if t.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid map kind: %v", t))
}
if _, ok := messageTypeCache.Load(s); ok {
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
}
messageTypeCache.Store(s, t)
}
// MessageType returns the message type for a named message.
// It returns nil if not found.
//
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
func MessageType(s messageName) reflect.Type {
if v, ok := messageTypeCache.Load(s); ok {
return v.(reflect.Type)
}
// Derive the message type from the v2 registry.
var t reflect.Type
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
t = messageGoType(mt)
}
// If we could not get a concrete type, it is possible that it is a
// pseudo-message for a map entry.
if t == nil {
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
kt := goTypeForField(md.Fields().ByNumber(1))
vt := goTypeForField(md.Fields().ByNumber(2))
t = reflect.MapOf(kt, vt)
}
}
// Locally cache the message type for the given name.
if t != nil {
v, _ := messageTypeCache.LoadOrStore(s, t)
return v.(reflect.Type)
}
return nil
}
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
switch k := fd.Kind(); k {
case protoreflect.EnumKind:
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
return enumGoType(et)
}
return reflect.TypeOf(protoreflect.EnumNumber(0))
case protoreflect.MessageKind, protoreflect.GroupKind:
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
return messageGoType(mt)
}
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
default:
return reflect.TypeOf(fd.Default().Interface())
}
}
func enumGoType(et protoreflect.EnumType) reflect.Type {
return reflect.TypeOf(et.New(0))
}
func messageGoType(mt protoreflect.MessageType) reflect.Type {
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
}
// MessageName returns the full protobuf name for the given message type.
//
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
func MessageName(m Message) messageName {
if m == nil {
return ""
}
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
return m.XXX_MessageName()
}
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
}
// RegisterExtension is called from the generated code to register
// the extension descriptor.
//
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
func RegisterExtension(d *ExtensionDesc) {
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
panic(err)
}
}
type extensionsByNumber = map[int32]*ExtensionDesc
var extensionCache sync.Map // map[messageName]extensionsByNumber
// RegisteredExtensions returns a map of the registered extensions for the
// provided protobuf message, indexed by the extension field number.
//
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
func RegisteredExtensions(m Message) extensionsByNumber {
// Check whether the cache is stale. If the number of extensions for
// the given message differs, then it means that some extensions were
// recently registered upstream that we do not know about.
s := MessageName(m)
v, _ := extensionCache.Load(s)
xs, _ := v.(extensionsByNumber)
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
return xs // cache is up-to-date
}
// Cache is stale, re-compute the extensions map.
xs = make(extensionsByNumber)
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
if xd, ok := xt.(*ExtensionDesc); ok {
xs[int32(xt.TypeDescriptor().Number())] = xd
} else {
// TODO: This implies that the protoreflect.ExtensionType is a
// custom type not generated by protoc-gen-go. We could try and
// convert the type to an ExtensionDesc.
}
return true
})
extensionCache.Store(s, xs)
return xs
}

801
vendor/github.com/golang/protobuf/proto/text_decode.go generated vendored Normal file
View File

@ -0,0 +1,801 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/encoding/prototext"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextUnmarshalV2 = false
// ParseError is returned by UnmarshalText.
type ParseError struct {
Message string
// Deprecated: Do not use.
Line, Offset int
}
func (e *ParseError) Error() string {
if wrapTextUnmarshalV2 {
return e.Message
}
if e.Line == 1 {
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
}
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
}
// UnmarshalText parses a proto text formatted string into m.
func UnmarshalText(s string, m Message) error {
if u, ok := m.(encoding.TextUnmarshaler); ok {
return u.UnmarshalText([]byte(s))
}
m.Reset()
mi := MessageV2(m)
if wrapTextUnmarshalV2 {
err := prototext.UnmarshalOptions{
AllowPartial: true,
}.Unmarshal([]byte(s), mi)
if err != nil {
return &ParseError{Message: err.Error()}
}
return checkRequiredNotSet(mi)
} else {
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
return err
}
return checkRequiredNotSet(mi)
}
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
md := m.Descriptor()
fds := md.Fields()
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
seen := make(map[protoreflect.FieldNumber]bool)
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := protoreflect.Name(tok.value)
fd := fds.ByName(name)
switch {
case fd == nil:
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
fd = gd
}
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
fd = nil
case fd.IsWeak() && fd.Message().IsPlaceholder():
fd = nil
}
if fd == nil {
typeName := string(md.FullName())
if m, ok := m.Interface().(Message); ok {
t := reflect.TypeOf(m)
if t.Kind() == reflect.Ptr {
typeName = t.Elem().String()
}
}
return p.errorf("unknown field name %q in %v", name, typeName)
}
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
}
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
return p.errorf("non-repeated field %q was repeated", fd.Name())
}
seen[fd.Number()] = true
// Consume any colon.
if err := p.checkForColon(fd); err != nil {
return err
}
// Parse into the field.
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
if v, err = p.unmarshalValue(v, fd); err != nil {
return err
}
m.Set(fd, v)
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
return nil
}
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
name, err := p.consumeExtensionOrAnyName()
if err != nil {
return err
}
// If it contains a slash, it's an Any type URL.
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
tok := p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
if err != nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
}
m2 := mt.New()
if err := p.unmarshalMessage(m2, terminator); err != nil {
return err
}
b, err := protoV2.Marshal(m2.Interface())
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
}
urlFD := m.Descriptor().Fields().ByName("type_url")
valFD := m.Descriptor().Fields().ByName("value")
if seen[urlFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
}
if seen[valFD.Number()] {
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
}
m.Set(urlFD, protoreflect.ValueOfString(name))
m.Set(valFD, protoreflect.ValueOfBytes(b))
seen[urlFD.Number()] = true
seen[valFD.Number()] = true
return nil
}
xname := protoreflect.FullName(name)
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(m.Descriptor()) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
return p.errorf("unrecognized extension %q", name)
}
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
}
if err := p.checkForColon(fd); err != nil {
return err
}
v := m.Get(fd)
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
v = m.Mutable(fd)
}
v, err = p.unmarshalValue(v, fd)
if err != nil {
return err
}
m.Set(fd, v)
return p.consumeOptionalSeparator()
}
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch {
case fd.IsList():
lv := v.List()
var err error
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return v, nil
}
// One value of the repeated field.
p.back()
vv := lv.NewElement()
vv, err = p.unmarshalSingularValue(vv, fd)
if err != nil {
return v, err
}
lv.Append(vv)
return v, nil
case fd.IsMap():
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order.
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
keyFD := fd.MapKey()
valFD := fd.MapValue()
mv := v.Map()
kv := keyFD.Default()
vv := mv.NewValue()
for {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == terminator {
break
}
var err error
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return v, err
}
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
case "value":
if err := p.checkForColon(valFD); err != nil {
return v, err
}
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
return v, err
}
if err := p.consumeOptionalSeparator(); err != nil {
return v, err
}
default:
p.back()
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
mv.Set(kv.MapKey(), vv)
return v, nil
default:
p.back()
return p.unmarshalSingularValue(v, fd)
}
}
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
tok := p.next()
if tok.err != nil {
return v, tok.err
}
if tok.value == "" {
return v, p.errorf("unexpected EOF")
}
switch fd.Kind() {
case protoreflect.BoolKind:
switch tok.value {
case "true", "1", "t", "True":
return protoreflect.ValueOfBool(true), nil
case "false", "0", "f", "False":
return protoreflect.ValueOfBool(false), nil
}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
}
}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(x)), nil
}
// The C++ parser accepts large positive hex numbers that uses
// two's complement arithmetic to represent negative numbers.
// This feature is here for backwards compatibility with C++.
if strings.HasPrefix(tok.value, "0x") {
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
}
}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfUint32(uint32(x)), nil
}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
return protoreflect.ValueOfUint64(uint64(x)), nil
}
case protoreflect.FloatKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 32); err == nil {
return protoreflect.ValueOfFloat32(float32(x)), nil
}
case protoreflect.DoubleKind:
// Ignore 'f' for compatibility with output generated by C++,
// but don't remove 'f' when the value is "-inf" or "inf".
v := tok.value
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
v = v[:len(v)-len("f")]
}
if x, err := strconv.ParseFloat(v, 64); err == nil {
return protoreflect.ValueOfFloat64(float64(x)), nil
}
case protoreflect.StringKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfString(tok.unquoted), nil
}
case protoreflect.BytesKind:
if isQuote(tok.value[0]) {
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
}
case protoreflect.EnumKind:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
}
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
if vd != nil {
return protoreflect.ValueOfEnum(vd.Number()), nil
}
case protoreflect.MessageKind, protoreflect.GroupKind:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return v, p.errorf("expected '{' or '<', found %q", tok.value)
}
err := p.unmarshalMessage(v.Message(), terminator)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
if fd.Message() == nil {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
// the following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in unmarshalMessage to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
var errBadUTF8 = errors.New("proto: bad UTF-8")
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
ss := string(r) + s[:2]
s = s[2:]
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(rune(i)), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}

560
vendor/github.com/golang/protobuf/proto/text_encode.go generated vendored Normal file
View File

@ -0,0 +1,560 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
"bytes"
"encoding"
"fmt"
"io"
"math"
"sort"
"strings"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapTextMarshalV2 = false
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line)
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes the proto text format of m to w.
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
b, err := tm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// Text returns a proto text formatted string of m.
func (tm *TextMarshaler) Text(m Message) string {
b, _ := tm.marshal(m)
return string(b)
}
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
mr := MessageReflect(m)
if mr == nil || !mr.IsValid() {
return []byte("<nil>"), nil
}
if wrapTextMarshalV2 {
if m, ok := m.(encoding.TextMarshaler); ok {
return m.MarshalText()
}
opts := prototext.MarshalOptions{
AllowPartial: true,
EmitUnknown: true,
}
if !tm.Compact {
opts.Indent = " "
}
if !tm.ExpandAny {
opts.Resolver = (*protoregistry.Types)(nil)
}
return opts.Marshal(mr.Interface())
} else {
w := &textWriter{
compact: tm.Compact,
expandAny: tm.ExpandAny,
complete: true,
}
if m, ok := m.(encoding.TextMarshaler); ok {
b, err := m.MarshalText()
if err != nil {
return nil, err
}
w.Write(b)
return w.buf, nil
}
err := w.writeMessage(mr)
return w.buf, err
}
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// MarshalText writes the proto text format of m to w.
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
// MarshalTextString returns a proto text formatted string of m.
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
// CompactText writes the compact proto text format of m to w.
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
// CompactTextString returns a compact proto text formatted string of m.
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
var (
newline = []byte("\n")
endBraceNewline = []byte("}\n")
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
compact bool // same as TextMarshaler.Compact
expandAny bool // same as TextMarshaler.ExpandAny
complete bool // whether the current position is a complete line
indent int // indentation level; never negative
buf []byte
}
func (w *textWriter) Write(p []byte) (n int, _ error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, p...)
w.complete = false
return len(p), nil
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
w.buf = append(w.buf, ' ')
n++
}
w.buf = append(w.buf, frag...)
n += len(frag)
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
w.buf = append(w.buf, frag...)
n += len(frag)
if i+1 < len(frags) {
w.buf = append(w.buf, '\n')
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
w.buf = append(w.buf, c)
w.complete = c == '\n'
return nil
}
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
if fd.Kind() != protoreflect.GroupKind {
w.buf = append(w.buf, fd.Name()...)
w.WriteByte(':')
} else {
// Use message type name for group field name.
w.buf = append(w.buf, fd.Message().Name()...)
}
if !w.compact {
w.WriteByte(' ')
}
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
md := m.Descriptor()
fdURL := md.Fields().ByName("type_url")
fdVal := md.Fields().ByName("value")
url := m.Get(fdURL).String()
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
if err != nil {
return false, nil
}
b := m.Get(fdVal).Bytes()
m2 := mt.New()
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
return false, nil
}
w.Write([]byte("["))
if requiresQuotes(url) {
w.writeQuotedString(url)
} else {
w.Write([]byte(url))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.indent++
}
if err := w.writeMessage(m2); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.indent--
w.Write([]byte(">\n"))
}
return true, nil
}
func (w *textWriter) writeMessage(m protoreflect.Message) error {
md := m.Descriptor()
if w.expandAny && md.FullName() == "google.protobuf.Any" {
if canExpand, err := w.writeProto3Any(m); canExpand {
return err
}
}
fds := md.Fields()
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
} else {
i++
}
if fd == nil || !m.Has(fd) {
continue
}
switch {
case fd.IsList():
lv := m.Get(fd).List()
for j := 0; j < lv.Len(); j++ {
w.writeName(fd)
v := lv.Get(j)
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
}
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := m.Get(fd).Map()
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
for _, entry := range entries {
w.writeName(fd)
w.WriteByte('<')
if !w.compact {
w.WriteByte('\n')
}
w.indent++
w.writeName(kfd)
if err := w.writeSingularValue(entry.key, kfd); err != nil {
return err
}
w.WriteByte('\n')
w.writeName(vfd)
if err := w.writeSingularValue(entry.val, vfd); err != nil {
return err
}
w.WriteByte('\n')
w.indent--
w.WriteByte('>')
w.WriteByte('\n')
}
default:
w.writeName(fd)
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
return err
}
w.WriteByte('\n')
}
}
if b := m.GetUnknown(); len(b) > 0 {
w.writeUnknownFields(b)
}
return w.writeExtensions(m)
}
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
switch fd.Kind() {
case protoreflect.FloatKind, protoreflect.DoubleKind:
switch vf := v.Float(); {
case math.IsInf(vf, +1):
w.Write(posInf)
case math.IsInf(vf, -1):
w.Write(negInf)
case math.IsNaN(vf):
w.Write(nan)
default:
fmt.Fprint(w, v.Interface())
}
case protoreflect.StringKind:
// NOTE: This does not validate UTF-8 for historical reasons.
w.writeQuotedString(string(v.String()))
case protoreflect.BytesKind:
w.writeQuotedString(string(v.Bytes()))
case protoreflect.MessageKind, protoreflect.GroupKind:
var bra, ket byte = '<', '>'
if fd.Kind() == protoreflect.GroupKind {
bra, ket = '{', '}'
}
w.WriteByte(bra)
if !w.compact {
w.WriteByte('\n')
}
w.indent++
m := v.Message()
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
b, err := m2.MarshalText()
if err != nil {
return err
}
w.Write(b)
} else {
w.writeMessage(m)
}
w.indent--
w.WriteByte(ket)
case protoreflect.EnumKind:
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
fmt.Fprint(w, ev.Name())
} else {
fmt.Fprint(w, v.Enum())
}
default:
fmt.Fprint(w, v.Interface())
}
return nil
}
// writeQuotedString writes a quoted string in the protocol buffer text format.
func (w *textWriter) writeQuotedString(s string) {
w.WriteByte('"')
for i := 0; i < len(s); i++ {
switch c := s[i]; c {
case '\n':
w.buf = append(w.buf, `\n`...)
case '\r':
w.buf = append(w.buf, `\r`...)
case '\t':
w.buf = append(w.buf, `\t`...)
case '"':
w.buf = append(w.buf, `\"`...)
case '\\':
w.buf = append(w.buf, `\\`...)
default:
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
w.buf = append(w.buf, c)
} else {
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
}
}
}
w.WriteByte('"')
}
func (w *textWriter) writeUnknownFields(b []byte) {
if !w.compact {
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
}
for len(b) > 0 {
num, wtyp, n := protowire.ConsumeTag(b)
if n < 0 {
return
}
b = b[n:]
if wtyp == protowire.EndGroupType {
w.indent--
w.Write(endBraceNewline)
continue
}
fmt.Fprint(w, num)
if wtyp != protowire.StartGroupType {
w.WriteByte(':')
}
if !w.compact || wtyp == protowire.StartGroupType {
w.WriteByte(' ')
}
switch wtyp {
case protowire.VarintType:
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed32Type:
v, n := protowire.ConsumeFixed32(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.Fixed64Type:
v, n := protowire.ConsumeFixed64(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprint(w, v)
case protowire.BytesType:
v, n := protowire.ConsumeBytes(b)
if n < 0 {
return
}
b = b[n:]
fmt.Fprintf(w, "%q", v)
case protowire.StartGroupType:
w.WriteByte('{')
w.indent++
default:
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
}
w.WriteByte('\n')
}
}
// writeExtensions writes all the extensions in m.
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
md := m.Descriptor()
if md.ExtensionRanges().Len() == 0 {
return nil
}
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
// For message set, use the name of the message as the extension name.
name := string(ext.desc.FullName())
if isMessageSet(ext.desc.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
if !ext.desc.IsList() {
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
return err
}
} else {
lv := ext.val.List()
for i := 0; i < lv.Len(); i++ {
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
return err
}
}
}
}
return nil
}
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
fmt.Fprintf(w, "[%s]:", name)
if !w.compact {
w.WriteByte(' ')
}
if err := w.writeSingularValue(v, fd); err != nil {
return err
}
w.WriteByte('\n')
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
for i := 0; i < w.indent*2; i++ {
w.buf = append(w.buf, ' ')
}
w.complete = false
}

78
vendor/github.com/golang/protobuf/proto/wire.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
import (
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/runtime/protoiface"
)
// Size returns the size in bytes of the wire-format encoding of m.
func Size(m Message) int {
if m == nil {
return 0
}
mi := MessageV2(m)
return protoV2.Size(mi)
}
// Marshal returns the wire-format encoding of m.
func Marshal(m Message) ([]byte, error) {
b, err := marshalAppend(nil, m, false)
if b == nil {
b = zeroBytes
}
return b, err
}
var zeroBytes = make([]byte, 0, 0)
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
if m == nil {
return nil, ErrNil
}
mi := MessageV2(m)
nbuf, err := protoV2.MarshalOptions{
Deterministic: deterministic,
AllowPartial: true,
}.MarshalAppend(buf, mi)
if err != nil {
return buf, err
}
if len(buf) == len(nbuf) {
if !mi.ProtoReflect().IsValid() {
return buf, ErrNil
}
}
return nbuf, checkRequiredNotSet(mi)
}
// Unmarshal parses a wire-format message in b and places the decoded results in m.
//
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
// removed. Use UnmarshalMerge to preserve and append to existing data.
func Unmarshal(b []byte, m Message) error {
m.Reset()
return UnmarshalMerge(b, m)
}
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
func UnmarshalMerge(b []byte, m Message) error {
mi := MessageV2(m)
out, err := protoV2.UnmarshalOptions{
AllowPartial: true,
Merge: true,
}.UnmarshalState(protoiface.UnmarshalInput{
Buf: b,
Message: mi.ProtoReflect(),
})
if err != nil {
return err
}
if out.Flags&protoiface.UnmarshalInitialized > 0 {
return nil
}
return checkRequiredNotSet(mi)
}

34
vendor/github.com/golang/protobuf/proto/wrappers.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package proto
// Bool stores v in a new bool value and returns a pointer to it.
func Bool(v bool) *bool { return &v }
// Int stores v in a new int32 value and returns a pointer to it.
//
// Deprecated: Use Int32 instead.
func Int(v int) *int32 { return Int32(int32(v)) }
// Int32 stores v in a new int32 value and returns a pointer to it.
func Int32(v int32) *int32 { return &v }
// Int64 stores v in a new int64 value and returns a pointer to it.
func Int64(v int64) *int64 { return &v }
// Uint32 stores v in a new uint32 value and returns a pointer to it.
func Uint32(v uint32) *uint32 { return &v }
// Uint64 stores v in a new uint64 value and returns a pointer to it.
func Uint64(v uint64) *uint64 { return &v }
// Float32 stores v in a new float32 value and returns a pointer to it.
func Float32(v float32) *float32 { return &v }
// Float64 stores v in a new float64 value and returns a pointer to it.
func Float64(v float64) *float64 { return &v }
// String stores v in a new string value and returns a pointer to it.
func String(v string) *string { return &v }

View File

@ -0,0 +1,64 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
package timestamp
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/timestamp.proto.
type Timestamp = timestamppb.Timestamp
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
}

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1 @@
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)

View File

@ -0,0 +1 @@
cover.dat

View File

@ -0,0 +1,7 @@
all:
cover:
go test -cover -v -coverprofile=cover.dat ./...
go tool cover -func cover.dat
.PHONY: cover

View File

@ -0,0 +1,75 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"errors"
"io"
"github.com/golang/protobuf/proto"
)
var errInvalidVarint = errors.New("invalid varint32 encountered")
// ReadDelimited decodes a message from the provided length-delimited stream,
// where the length is encoded as 32-bit varint prefix to the message body.
// It returns the total number of bytes read and any applicable error. This is
// roughly equivalent to the companion Java API's
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
// calls r.Read repeatedly as required until exactly one message including its
// prefix is read and decoded (or an error has occurred). The function never
// reads more bytes from the stream than required. The function never returns
// an error if a message has been read and decoded correctly, even if the end
// of the stream has been reached in doing so. In that case, any subsequent
// calls return (0, io.EOF).
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
// Per AbstractParser#parsePartialDelimitedFrom with
// CodedInputStream#readRawVarint32.
var headerBuf [binary.MaxVarintLen32]byte
var bytesRead, varIntBytes int
var messageLength uint64
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
if bytesRead >= len(headerBuf) {
return bytesRead, errInvalidVarint
}
// We have to read byte by byte here to avoid reading more bytes
// than required. Each read byte is appended to what we have
// read before.
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
if newBytesRead == 0 {
if err != nil {
return bytesRead, err
}
// A Reader should not return (0, nil), but if it does,
// it should be treated as no-op (according to the
// Reader contract). So let's go on...
continue
}
bytesRead += newBytesRead
// Now present everything read so far to the varint decoder and
// see if a varint can be decoded already.
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
}
messageBuf := make([]byte, messageLength)
newBytesRead, err := io.ReadFull(r, messageBuf)
bytesRead += newBytesRead
if err != nil {
return bytesRead, err
}
return bytesRead, proto.Unmarshal(messageBuf, m)
}

View File

@ -0,0 +1,16 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil provides record length-delimited Protocol Buffer streaming.
package pbutil

View File

@ -0,0 +1,46 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"io"
"github.com/golang/protobuf/proto"
)
// WriteDelimited encodes and dumps a message to the provided writer prefixed
// with a 32-bit varint indicating the length of the encoded message, producing
// a length-delimited record stream, which can be used to chain together
// encoded messages of the same type together in a file. It returns the total
// number of bytes written and any applicable error. This is roughly
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
buffer, err := proto.Marshal(m)
if err != nil {
return 0, err
}
var buf [binary.MaxVarintLen32]byte
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength])
if err != nil {
return sync, err
}
n, err = w.Write(buffer)
return n + sync, err
}

3
vendor/github.com/mjl-/bstore/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
/cover.out
/cover.html
/testdata/*.db

7
vendor/github.com/mjl-/bstore/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2022 Mechiel Lukkien
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

20
vendor/github.com/mjl-/bstore/Makefile generated vendored Normal file
View File

@ -0,0 +1,20 @@
build:
go build ./...
go vet ./...
GOARCH=386 go vet ./...
staticcheck ./...
./gendoc.sh
fmt:
go fmt ./...
gofmt -w -s *.go cmd/bstore/*.go
test:
go test -race -shuffle=on -coverprofile cover.out
go tool cover -html=cover.out -o cover.html
benchmark:
go test -bench .
fuzz:
go test -fuzz .

51
vendor/github.com/mjl-/bstore/README.md generated vendored Normal file
View File

@ -0,0 +1,51 @@
bstore is a database library for storing and quering Go struct data.
See https://pkg.go.dev/github.com/mjl-/bstore
MIT-licensed
# Comparison
Bstore is designed as a small, pure Go library that still provides most of the
common data consistency requirements for modest database use cases. Bstore aims
to make basic use of cgo-based libraries, such as sqlite, unnecessary. Sqlite
is a great library, but Go applications that require cgo are hard to
cross-compile. With bstore, cross-compiling to most Go-supported platforms
stays trivial. Although bstore is much more limited in so many aspects than
sqlite, bstore also offers some advantages as well.
- Cross-compilation and reproducibility: Trivial with bstore due to pure Go,
much harder with sqlite because of cgo.
- Code complexity: low with bstore (6k lines including comments/docs), high
with sqlite.
- Query language: mostly-type-checked function calls in bstore, free-form query
strings only checked at runtime with sqlite.
- Functionality: very limited with bstore, much more full-featured with sqlite.
- Schema management: mostly automatic based on Go type definitions in bstore,
manual with ALTER statements in sqlite.
- Types and packing/parsing: automatic/transparent in bstore based on Go types
(including maps, slices, structs and custom MarshalBinary encoding), versus
manual scanning and parameter passing with sqlite with limited set of SQL
types.
- Performance: low to good performance with bstore, high performance with
sqlite.
- Database files: single file with bstore, several files with sqlite (due to
WAL or journal files).
- Test coverage: decent coverage but limited real-world for bstore, versus
extremely thoroughly tested and with enormous real-world use.
# FAQ
Q: Is bstore an ORM?
A: No. The API for bstore may look like an ORM. But instead of mapping bstore
"queries" (function calls) to an SQL query string, bstore executes them
directly without converting to a query language.
Q: How does bstore store its data?
A bstore database is a single-file BoltDB database. BoltDB provides ACID
properties. Bstore uses a BoltDB "bucket" (key/value store) for each Go type
stored, with multiple subbuckets: one for type definitions, one for the actual
data, and one bucket per index. BoltDB stores data in a B+tree. See format.md
for details.

80
vendor/github.com/mjl-/bstore/default.go generated vendored Normal file
View File

@ -0,0 +1,80 @@
package bstore
import (
"fmt"
"reflect"
"time"
)
var zerotime = time.Time{}
// applyDefault replaces zero values for fields that have a Default value configured.
func (tv *typeVersion) applyDefault(rv reflect.Value) error {
for _, f := range tv.Fields[1:] {
fv := rv.FieldByIndex(f.structField.Index)
if err := f.applyDefault(fv); err != nil {
return err
}
}
return nil
}
func (f field) applyDefault(rv reflect.Value) error {
switch f.Type.Kind {
case kindBytes, kindBinaryMarshal, kindMap:
return nil
case kindSlice, kindStruct:
return f.Type.applyDefault(rv)
case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindInt64, kindUint, kindUint8, kindUint16, kindUint32, kindUint64, kindFloat32, kindFloat64, kindString, kindTime:
if !f.defaultValue.IsValid() || !rv.IsZero() {
return nil
}
fv := f.defaultValue
// Time is special. "now" is encoded as the zero value of time.Time.
if f.Type.Kind == kindTime && fv.Interface() == zerotime {
now := time.Now().Round(0)
if f.Type.Ptr {
fv = reflect.ValueOf(&now)
} else {
fv = reflect.ValueOf(now)
}
} else if f.Type.Ptr {
fv = reflect.New(f.structField.Type.Elem())
fv.Elem().Set(f.defaultValue)
}
rv.Set(fv)
return nil
default:
return fmt.Errorf("internal error: missing case for %v", f.Type.Kind)
}
}
// only for recursing. we do not support recursing into maps because it would
// involve more work making values settable. and how sensible it it anyway?
func (ft fieldType) applyDefault(rv reflect.Value) error {
if ft.Ptr && (rv.IsZero() || rv.IsNil()) {
return nil
} else if ft.Ptr {
rv = rv.Elem()
}
switch ft.Kind {
case kindSlice:
n := rv.Len()
for i := 0; i < n; i++ {
if err := ft.List.applyDefault(rv.Index(i)); err != nil {
return err
}
}
case kindStruct:
for _, nf := range ft.Fields {
nfv := rv.FieldByIndex(nf.structField.Index)
if err := nf.applyDefault(nfv); err != nil {
return err
}
}
}
return nil
}

142
vendor/github.com/mjl-/bstore/doc.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
/*
Package bstore is a database library for storing and quering Go struct data.
Bstore is designed as a small, pure Go library that still provides most of
the common data consistency requirements for modest database use cases. Bstore
aims to make basic use of cgo-based libraries, such as sqlite, unnecessary.
Bstore implements autoincrementing primary keys, indices, default values,
enforcement of nonzero, unique and referential integrity constraints, automatic
schema updates and a query API for combining filters/sorting/limits. Queries
are planned and executed using indices for fast execution where possible.
Bstores is designed with the Go type system in mind: you typically don't have to
write any (un)marshal code for your types.
# Field types
Struct field types currently supported for storing, including pointers to these
types, but not pointers to pointers:
- int (as int32), int8, int16, int32, int64
- uint (as uint32), uint8, uint16, uint32, uint64
- bool, float32, float64, string, []byte
- Maps, with keys and values of any supported type, except keys with pointer types.
- Slices, with elements of any supported type.
- time.Time
- Types that implement binary.MarshalBinary and binary.UnmarshalBinary, useful
for struct types with state in private fields. Do not change the
(Un)marshalBinary method in an incompatible way without a data migration.
- Structs, with fields of any supported type.
Note: int and uint are stored as int32 and uint32, for compatibility of database
files between 32bit and 64bit systems. Where possible, use explicit (u)int32 or
(u)int64 types.
Embedded structs are handled by storing the individual fields of the embedded
struct. The named embedded type is not part of the type schema, and can
currently only be used with UpdateField and UpdateFields, not for filtering.
Bstore embraces the use of Go zero values. Use zero values, possibly pointers,
where you would use NULL values in SQL.
Types that have not yet been implemented: interface values, (fixed length) arrays,
complex numbers.
# Struct tags
The typical Go struct can be stored in the database. The first field of a
struct type is its primary key, and must always be unique. Additional behaviour
can be configured through struct tag "bstore". The values are comma-separated.
Typically one word, but some have multiple space-separated words:
- "-" ignores the field entirely.
- "name <fieldname>", use "fieldname" instead of the Go type field name.
- "nonzero", enforces that field values are not the zero value.
- "noauto", only valid for integer types, and only for the primary key. By
default, an integer-typed primary key will automatically get a next value
assigned on insert when it is 0. With noauto inserting a 0 value results in an
error. For primary keys of other types inserting the zero value always results
in an error.
- "index" or "index <field1+field2+...> [<name>]", adds an index. In the first
form, the index is on the field on which the tag is specified, and the index
name is the same as the field name. In the second form multiple fields can be
specified, and an optional name. The first field must be the field on which
the tag is specified. The field names are +-separated. The default name for
the second form is the same +-separated string but can be set explicitly to
the second parameter. An index can only be set for basic integer types, bools,
time and strings. Indices are automatically (re)created when registering a
type.
- "unique" or "unique <field1+field2+...> [<name>]", adds an index as with
"index" and also enforces a unique constraint. For time.Time the timezone is
ignored for the uniqueness check.
- "ref <type>", enforces that the value exists as primary key for "type".
Field types must match exactly, e.g. you cannot reference an int with an int64.
An index is automatically created and maintained for fields with a foreign key,
for efficiently checking that removed records in the referenced type are not in
use. If the field has the zero value, the reference is not checked. If you
require a valid reference, add "nonzero".
- "default <value>", replaces a zero value with the specified value on record
insert. Special value "now" is recognized for time.Time as the current time.
Times are parsed as time.RFC3339 otherwise. Supported types: bool
("true"/"false"), integers, floats, strings. Value is not quoted and no escaping
of special characters, like the comma that separates struct tag words, is
possible. Defaults are also replaced on fields in nested structs and
slices, but not in maps.
- "typename <name>", override name of the type. The name of the Go type is
used by default. Can only be present on the first field (primary key).
Useful for doing schema updates.
# Schema updates
Before using a Go type, you must register it for use with the open database by
passing a (zero) value of that type to the Open or Register functions. For each
type, a type definition is stored in the database. If a type has an updated
definition since the previous database open, a new type definition is added to
the database automatically and any required modifications are made: Indexes
(re)created, fields added/removed, new nonzero/unique/reference constraints
validated.
If data/types cannot be updated automatically (e.g. converting an int field into
a string field), custom data migration code is needed. You may have to keep
track of a data/schema version.
As a special case, you can switch field types between pointer and non-pointer
types. With one exception: changing from pointer to non-pointer where the type
has a field that must be nonzer is not allowed. The on-disk encoding will not be
changed, and nil pointers will turn into zero values, and zero values into nil
pointers. Also see section Limitations about pointer types.
Because named embed structs are not part of the type definition, you can
wrap/unwrap fields into a embed/anonymous struct field. No new type definition
is created.
# BoltDB
BoltDB is used as underlying storage. Bolt provides ACID transactions, storing
its data in a B+tree. Only a single write transaction can be active at a time,
but otherwise multiple read-only transactions can be active. Do not start a
blocking read-only transaction while holding a writable transaction or vice
versa, this will cause deadlock.
Bolt uses Go types that are memory mapped to the database file. This means bolt
database files cannot be transferred between machines with different endianness.
Bolt uses explicit widths for its types, so files can be transferred between
32bit and 64bit machines of same endianness.
# Limitations
Bstore does not implement the equivalent of SQL joins, aggregates, and many
other concepts.
Filtering/comparing/sorting on pointer fields is not currently allowed. Pointer
fields cannot have a (unique) index due to the current index format. Using zero
values is recommended instead for now.
Integer field types can be expanded to wider types, but not to a different
signedness or a smaller integer (fewer bits). The primary key of a type cannot
currently be changed.
The first field of a struct is always the primary key. Types requires an
explicit primary key. Autoincrement is only available for the primary key.
*/
package bstore

91
vendor/github.com/mjl-/bstore/equal.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package bstore
import (
"bytes"
"encoding"
"reflect"
"time"
)
// equal checks if ov and v are the same as far as storage is concerned. i.e.
// this only takes stored fields into account. reflect.DeepEqual cannot be used,
// it would take all fields into account, including unexported.
func (tv *typeVersion) equal(ov, v reflect.Value) (r bool) {
if !ov.IsValid() || !v.IsValid() {
return false
}
for _, f := range tv.Fields {
fov := ov.FieldByIndex(f.structField.Index)
fv := v.FieldByIndex(f.structField.Index)
if !f.Type.equal(fov, fv) {
return false
}
}
return true
}
func (ft fieldType) equal(ov, v reflect.Value) (r bool) {
if ov == v {
return true
} else if !ov.IsValid() || !v.IsValid() {
return false
}
if ft.Ptr {
ov = ov.Elem()
v = v.Elem()
}
if ov == v {
return true
} else if !ov.IsValid() || !v.IsValid() {
return false
}
switch ft.Kind {
case kindBytes:
return bytes.Equal(ov.Bytes(), v.Bytes())
case kindMap:
on := ov.Len()
n := v.Len()
if on != n {
return false
}
r := ov.MapRange()
for r.Next() {
vv := v.MapIndex(r.Key())
if !vv.IsValid() || !ft.MapValue.equal(r.Value(), vv) {
return false
}
}
return true
case kindSlice:
on := ov.Len()
n := v.Len()
if on != n {
return false
}
for i := 0; i < n; i++ {
if !ft.List.equal(ov.Index(i), v.Index(i)) {
return false
}
}
return true
case kindTime:
return ov.Interface().(time.Time).Equal(v.Interface().(time.Time))
case kindBinaryMarshal:
obuf, oerr := ov.Interface().(encoding.BinaryMarshaler).MarshalBinary()
buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
if oerr != nil || err != nil {
return false // todo: should propagate error?
}
return bytes.Equal(obuf, buf)
case kindStruct:
for _, f := range ft.Fields {
fov := ov.FieldByIndex(f.structField.Index)
fv := v.FieldByIndex(f.structField.Index)
if !f.Type.equal(fov, fv) {
return false
}
}
return true
}
return ov.Interface() == v.Interface()
}

568
vendor/github.com/mjl-/bstore/exec.go generated vendored Normal file
View File

@ -0,0 +1,568 @@
package bstore
import (
"bytes"
"fmt"
"reflect"
"sort"
"time"
bolt "go.etcd.io/bbolt"
)
// exec represents the execution of a query plan.
type exec[T any] struct {
q *Query[T]
plan *plan[T]
// For queries with explicit PKs filtered on.
// See plan.keys. We remove items from the list when we looked one up, but we keep the slice non-nil.
keys [][]byte
// If -1, no limit is set. This is different from Query where 0 means
// no limit. We count back and 0 means the end.
limit int
data []pair[T] // If not nil (even if empty), serve nextKey requests from here.
ib *bolt.Bucket
rb *bolt.Bucket
forward func() (bk, bv []byte) // Once we start scanning, we prepare forward to next/prev to the following value.
}
// exec creates a new execution for the plan, registering statistics.
func (p *plan[T]) exec(q *Query[T]) *exec[T] {
q.stats.Queries++
if p.idx == nil {
if p.keys != nil {
q.stats.PlanPK++
} else if p.start != nil || p.stop != nil {
q.stats.PlanPKScan++
} else {
q.stats.PlanTableScan++
}
q.stats.LastIndex = ""
} else {
if p.keys != nil {
q.stats.PlanUnique++
} else {
q.stats.PlanIndexScan++
}
q.stats.LastIndex = p.idx.Name
}
if len(p.orders) > 0 {
q.stats.Sort++
}
q.stats.LastOrdered = p.start != nil || p.stop != nil
q.stats.LastAsc = !p.desc
limit := -1
if q.xlimit > 0 {
limit = q.xlimit
}
return &exec[T]{q: q, plan: p, keys: p.keys, limit: limit}
}
// incr treats buf as a bigendian number, increasing it by one. used for reverse
// scans, where we must start beyond the key prefix we are looking for.
func incr(buf []byte) bool {
for i := len(buf) - 1; i >= 0; i-- {
if buf[i] < 255 {
buf[i]++
return true
}
buf[i] = 0
}
return false
}
func cutoff(b []byte, n int) []byte {
if len(b) <= n {
return b
}
return b[:n]
}
// nextKey returns the key and optionally value for the next selected record.
//
// ErrAbsent is returned if there is no more record.
//
// If an error occurs, an error is set on query, except in the case of
// ErrAbsent. ErrAbsent does not finish the query because a Delete or Update
// could follow.
func (e *exec[T]) nextKey(write, value bool) ([]byte, T, error) {
var zero T
q := e.q
if q.err != nil {
return nil, zero, q.err
}
// We collected & sorted data previously. Return from it until done.
// Limit was already applied.
if e.data != nil {
if len(e.data) == 0 {
return nil, zero, ErrAbsent
}
p := e.data[0]
e.data = e.data[1:]
var v T
if value {
var err error
v, err = p.Value(e)
if err != nil {
q.error(err)
return nil, zero, err
}
}
return p.bk, v, nil
}
if e.limit == 0 {
return nil, zero, ErrAbsent
}
// First time we are going to need buckets.
if e.rb == nil {
tx, err := q.tx(write)
if err != nil {
q.error(err)
return nil, zero, err
}
e.rb, err = tx.recordsBucket(q.st.Name, q.st.Current.fillPercent)
if err != nil {
return nil, zero, err
}
if e.plan.idx != nil {
e.ib, err = tx.indexBucket(e.plan.idx)
if err != nil {
return nil, zero, err
}
}
}
// List of IDs (records) or full unique index equality match.
// We can get the records/index value by a simple "get" on the key.
if e.keys != nil {
collect := len(e.plan.orders) > 0
if collect {
e.data = []pair[T]{} // Must be non-nil to get into e.data branch!
}
for i, xk := range e.keys {
var bk, bv []byte
// For indices, we need look up the PK through the index.
if e.plan.idx != nil {
c := e.ib.Cursor()
q.stats.Index.Cursor++
bki, _ := c.Seek(xk)
if !bytes.HasPrefix(bki, xk) {
continue
}
// log.Printf("seek %x, bki %x", xk, bki)
bk = bki[len(xk):]
} else {
bk = xk
}
// We don't need to fetch the full record now if it isn't needed by
// caller. It may be fetch below for more filters.
if value || e.plan.idx == nil {
q.stats.Records.Get++
bv = e.rb.Get(bk)
if bv == nil {
if e.plan.idx != nil {
return nil, zero, fmt.Errorf("%w: record with pk %x referenced through index %q not found", ErrStore, bk, e.plan.idx.Name)
}
continue
}
}
p := pair[T]{bk, bv, nil}
if ok, err := e.checkFilter(&p); err != nil {
return nil, zero, err
} else if !ok {
continue
}
if collect {
e.data = append(e.data, p)
continue
}
// Again, only fetch value if needed.
var v T
if value {
var err error
v, err = p.Value(e)
if err != nil {
q.error(err)
return nil, zero, err
}
}
if e.limit > 0 {
e.limit--
}
e.keys = e.keys[i+1:]
return bk, v, nil
}
if !collect {
return nil, zero, ErrAbsent
}
// Restart, now with data.
e.keys = [][]byte{}
e.sort()
if e.limit > 0 && len(e.data) > e.limit {
e.data = e.data[:e.limit]
}
return q.nextKey(write, value)
}
// We are going to do a scan, either over the records or an index. We may have a start and stop key.
collect := len(e.plan.orders) > 0
if collect {
e.data = []pair[T]{} // Must be non-nil to get into e.data branch on function restart.
}
for {
var xk, xv []byte
if e.forward == nil {
// First time we are in this loop, we set up a cursor and e.forward.
var c *bolt.Cursor
var statsKV *StatsKV
if e.plan.idx == nil {
c = e.rb.Cursor()
statsKV = &q.stats.Records
} else {
c = e.ib.Cursor()
statsKV = &q.stats.Index
}
if !e.plan.desc {
e.forward = c.Next
if e.plan.start != nil {
statsKV.Cursor++
// If e.plan.start does not exist, seek will skip to the
// next value after. Fine because this is ascending order.
xk, xv = c.Seek(e.plan.start)
} else {
statsKV.Cursor++
xk, xv = c.First()
}
} else {
e.forward = c.Prev
if e.plan.start == nil {
statsKV.Cursor++
xk, xv = c.Last()
} else {
start := make([]byte, len(e.plan.start))
copy(start, e.plan.start)
ok := incr(start)
if !ok {
statsKV.Cursor++
// We were at the last representable value. So we simply start at the end.
xk, xv = c.Last()
} else {
statsKV.Cursor++
xk, xv = c.Seek(start)
if xk == nil {
statsKV.Cursor++
xk, xv = c.Last()
}
// We started at the value after where we were requested to start, so we have to
// move until we find a matching key.
// todo: we could take e.plan.stop into account (if set). right now we may be
// seeking all the way to the front without ever seeing a match to stop.
for xk != nil && bytes.Compare(cutoff(xk, len(e.plan.start)), e.plan.start) > 0 {
statsKV.Cursor++
xk, xv = e.forward()
}
}
}
}
} else {
if e.plan.idx == nil {
q.stats.Records.Cursor++
} else {
q.stats.Index.Cursor++
}
xk, xv = e.forward()
// log.Printf("forwarded, %x %x", xk, xv)
}
if xk == nil {
break
}
if e.plan.start != nil && !e.plan.startInclusive && bytes.HasPrefix(xk, e.plan.start) {
continue
}
if e.plan.stop != nil {
cmp := bytes.Compare(cutoff(xk, len(e.plan.stop)), e.plan.stop)
if !e.plan.desc && (e.plan.stopInclusive && cmp > 0 || !e.plan.stopInclusive && cmp >= 0) {
break
} else if e.plan.desc && (e.plan.stopInclusive && cmp < 0 || !e.plan.stopInclusive && cmp <= 0) {
break
}
}
var pk, bv []byte
if e.plan.idx == nil {
pk = xk
bv = xv
} else {
var err error
pk, _, err = e.plan.idx.parseKey(xk, false)
if err != nil {
q.error(err)
return nil, zero, err
}
}
p := pair[T]{pk, bv, nil}
if ok, err := e.checkFilter(&p); err != nil {
return nil, zero, err
} else if !ok {
continue
}
//log.Printf("have kv, %x %x", p.bk, p.bv)
var v T
var err error
if value {
v, err = p.Value(e)
if err != nil {
q.error(err)
return nil, zero, err
}
}
if collect {
e.data = append(e.data, p)
continue
}
if e.limit > 0 {
e.limit--
}
return p.bk, v, nil
}
if !collect {
return nil, zero, ErrAbsent
}
// Restart, now with data.
e.sort()
if e.limit > 0 && len(e.data) > e.limit {
e.data = e.data[:e.limit]
}
return e.nextKey(write, value)
}
// checkFilter checks against the filters for the plan.
func (e *exec[T]) checkFilter(p *pair[T]) (rok bool, rerr error) {
q := e.q
for _, ff := range e.plan.filters {
switch f := ff.(type) {
// note: filterIDs is not here, it is handled earlier to fetch records.
case filterFn[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
if !f.fn(v) {
return
}
case filterEqual[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
if !f.field.Type.equal(frv, f.rvalue) {
return
}
case filterNotEqual[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
if f.field.Type.equal(frv, f.rvalue) {
return
}
case filterIn[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
var have bool
for _, xrv := range f.rvalues {
if f.field.Type.equal(frv, xrv) {
have = true
break
}
}
if !have {
return
}
case filterNotIn[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
frv := rv.FieldByIndex(f.field.structField.Index)
for _, xrv := range f.rvalues {
if f.field.Type.equal(frv, xrv) {
return
}
}
case filterCompare[T]:
v, err := p.Value(e)
if err != nil {
q.error(err)
return false, err
}
rv := reflect.ValueOf(v)
fv := rv.FieldByIndex(f.field.structField.Index)
cmp := compare(f.field.Type.Kind, fv, f.value)
switch {
case cmp == 0 && (f.op == opGreaterEqual || f.op == opLessEqual):
case cmp < 0 && (f.op == opLess || f.op == opLessEqual):
case cmp > 0 && (f.op == opGreater || f.op == opGreaterEqual):
default:
return
}
default:
q.errorf("internal error: missing case for filter %T", ff)
return false, q.err
}
}
return true, nil
}
// if type can be compared for filterCompare, eg for greater/less comparison.
func comparable(ft fieldType) bool {
if ft.Ptr {
return false
}
switch ft.Kind {
case kindBytes, kindString, kindBool, kindInt8, kindInt16, kindInt32, kindInt64, kindInt, kindUint8, kindUint16, kindUint32, kindUint64, kindUint, kindFloat32, kindFloat64, kindTime:
return true
default:
return false
}
}
func compare(k kind, a, b reflect.Value) int {
switch k {
case kindBytes:
return bytes.Compare(a.Bytes(), b.Bytes())
case kindString:
sa := a.String()
sb := b.String()
if sa < sb {
return -1
} else if sa > sb {
return 1
}
return 0
case kindBool:
ba := a.Bool()
bb := b.Bool()
if !ba && bb {
return -1
} else if ba && !bb {
return 1
}
return 0
case kindInt8, kindInt16, kindInt32, kindInt64, kindInt:
ia := a.Int()
ib := b.Int()
if ia < ib {
return -1
} else if ia > ib {
return 1
}
return 0
case kindUint8, kindUint16, kindUint32, kindUint64, kindUint:
ia := a.Uint()
ib := b.Uint()
if ia < ib {
return -1
} else if ia > ib {
return 1
}
return 0
case kindFloat32, kindFloat64:
fa := a.Float()
fb := b.Float()
if fa < fb {
return -1
} else if fa > fb {
return 1
}
return 0
case kindTime:
ta := a.Interface().(time.Time)
tb := b.Interface().(time.Time)
if ta.Before(tb) {
return -1
} else if ta.After(tb) {
return 1
}
return 0
}
// todo: internal error, cannot happen
return 0
}
func (e *exec[T]) sort() {
// todo: We should check whether we actually need to load values. We're just
// always it now for the time being because SortStableFunc isn't going to
// give us a *pair (even though it could because of the slice) so we
// couldn't set/cache the value T during sorting.
q := e.q
for i := range e.data {
p := &e.data[i]
if p.value != nil {
continue
}
_, err := p.Value(e)
if err != nil {
q.error(err)
return
}
}
sort.SliceStable(e.data, func(i, j int) bool {
a := e.data[i]
b := e.data[j]
for _, o := range e.plan.orders {
ra := reflect.ValueOf(*a.value)
rb := reflect.ValueOf(*b.value)
rva := ra.FieldByIndex(o.field.structField.Index)
rvb := rb.FieldByIndex(o.field.structField.Index)
cmp := compare(o.field.Type.Kind, rva, rvb)
if cmp == 0 {
continue
}
return cmp < 0 && o.asc || cmp > 0 && !o.asc
}
return false
})
}

387
vendor/github.com/mjl-/bstore/export.go generated vendored Normal file
View File

@ -0,0 +1,387 @@
package bstore
import (
"fmt"
"math"
"reflect"
"strconv"
"time"
bolt "go.etcd.io/bbolt"
)
// Types returns the types present in the database, regardless of whether they
// are currently registered using Open or Register. Useful for exporting data
// with Keys and Records.
func (db *DB) Types() ([]string, error) {
var types []string
err := db.Read(func(tx *Tx) error {
return tx.btx.ForEach(func(bname []byte, b *bolt.Bucket) error {
// note: we do not track stats for types operations.
types = append(types, string(bname))
return nil
})
})
if err != nil {
return nil, err
}
return types, nil
}
// prepareType prepares typeName for export/introspection with DB.Keys,
// DB.Record, DB.Records. It is different in that it does not require a
// reflect.Type to parse into. It parses to a map, e.g. for export to JSON. The
// returned typeVersion has no structFields set in its fields.
func (db *DB) prepareType(tx *Tx, typeName string) (map[uint32]*typeVersion, *typeVersion, *bolt.Bucket, []string, error) {
rb, err := tx.recordsBucket(typeName, 0.5)
if err != nil {
return nil, nil, nil, nil, err
}
tb, err := tx.bucket(bucketKey{typeName, "types"})
if err != nil {
return nil, nil, nil, nil, err
}
versions := map[uint32]*typeVersion{}
var tv *typeVersion
err = tb.ForEach(func(bk, bv []byte) error {
// note: we do not track stats for types operations.
ntv, err := parseSchema(bk, bv)
if err != nil {
return err
}
versions[ntv.Version] = ntv
if tv == nil || ntv.Version > tv.Version {
tv = ntv
}
return nil
})
if err != nil {
return nil, nil, nil, nil, err
}
if tv == nil {
return nil, nil, nil, nil, fmt.Errorf("%w: no type versions", ErrStore)
}
fields := make([]string, len(tv.Fields))
for i, f := range tv.Fields {
fields[i] = f.Name
}
return versions, tv, rb, fields, nil
}
// Keys returns the parsed primary keys for the type "typeName". The type does
// not have to be registered with Open or Register. For use with Record(s) to
// export data.
func (db *DB) Keys(typeName string, fn func(pk any) error) error {
return db.Read(func(tx *Tx) error {
_, tv, rb, _, err := db.prepareType(tx, typeName)
if err != nil {
return err
}
// todo: do not pass nil parser?
v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(nil))).Elem()
return rb.ForEach(func(bk, bv []byte) error {
tx.stats.Records.Cursor++
if err := parsePK(v, bk); err != nil {
return err
}
return fn(v.Interface())
})
})
}
// Record returns the record with primary "key" for "typeName" parsed as map.
// "Fields" is set to the fields of the type. The type does not have to be
// registered with Open or Register. Record parses the data without the Go
// type present. BinaryMarshal fields are returned as bytes.
func (db *DB) Record(typeName, key string, fields *[]string) (map[string]any, error) {
var r map[string]any
err := db.Read(func(tx *Tx) error {
versions, tv, rb, xfields, err := db.prepareType(tx, typeName)
if err != nil {
return err
}
*fields = xfields
var kv any
switch tv.Fields[0].Type.Kind {
case kindBool:
switch key {
case "true":
kv = true
case "false":
kv = false
default:
err = fmt.Errorf("%w: invalid bool %q", ErrParam, key)
}
case kindInt8:
kv, err = strconv.ParseInt(key, 10, 8)
case kindInt16:
kv, err = strconv.ParseInt(key, 10, 16)
case kindInt32:
kv, err = strconv.ParseInt(key, 10, 32)
case kindInt:
kv, err = strconv.ParseInt(key, 10, 32)
case kindInt64:
kv, err = strconv.ParseInt(key, 10, 64)
case kindUint8:
kv, err = strconv.ParseUint(key, 10, 8)
case kindUint16:
kv, err = strconv.ParseUint(key, 10, 16)
case kindUint32:
kv, err = strconv.ParseUint(key, 10, 32)
case kindUint:
kv, err = strconv.ParseUint(key, 10, 32)
case kindUint64:
kv, err = strconv.ParseUint(key, 10, 64)
case kindString:
kv = key
case kindBytes:
kv = []byte(key) // todo: or decode from base64?
default:
return fmt.Errorf("internal error: unknown primary key kind %v", tv.Fields[0].Type.Kind)
}
if err != nil {
return err
}
pkv := reflect.ValueOf(kv)
kind, err := typeKind(pkv.Type())
if err != nil {
return err
}
if kind != tv.Fields[0].Type.Kind {
// Convert from various int types above to required type. The ParseInt/ParseUint
// calls already validated that the values fit.
pkt := reflect.TypeOf(tv.Fields[0].Type.zero(nil))
pkv = pkv.Convert(pkt)
}
k, err := packPK(pkv)
if err != nil {
return err
}
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
record, err := parseMap(versions, k, bv)
if err != nil {
return err
}
r = record
return nil
})
return r, err
}
// Records calls "fn" for each record of "typeName". Records sets "fields" to
// the fields of the type. The type does not have to be registered with Open or
// Register. Record parses the data without the Go type present. BinaryMarshal
// fields are returned as bytes.
func (db *DB) Records(typeName string, fields *[]string, fn func(map[string]any) error) error {
return db.Read(func(tx *Tx) error {
versions, _, rb, xfields, err := db.prepareType(tx, typeName)
if err != nil {
return err
}
*fields = xfields
return rb.ForEach(func(bk, bv []byte) error {
tx.stats.Records.Cursor++
record, err := parseMap(versions, bk, bv)
if err != nil {
return err
}
return fn(record)
})
})
}
// parseMap parses a record into a map with the right typeVersion from versions.
func parseMap(versions map[uint32]*typeVersion, bk, bv []byte) (record map[string]any, rerr error) {
p := &parser{buf: bv, orig: bv}
var version uint32
defer func() {
x := recover()
if x == nil {
return
}
if err, ok := x.(parseErr); ok {
rerr = fmt.Errorf("%w (version %d, buf %x orig %x)", err.err, version, p.buf, p.orig)
return
}
panic(x)
}()
version = uint32(p.Uvarint())
tv := versions[version]
if tv == nil {
return nil, fmt.Errorf("%w: unknown type version %d", ErrStore, version)
}
r := map[string]any{}
v := reflect.New(reflect.TypeOf(tv.Fields[0].Type.zero(p))).Elem()
err := parsePK(v, bk)
if err != nil {
return nil, err
}
r[tv.Fields[0].Name] = v.Interface()
// todo: Should we be looking at the most recent tv, and hiding fields
// that have been removed in a later typeVersion? Like we do for real
// parsing into reflect value?
fm := p.Fieldmap(len(tv.Fields) - 1)
for i, f := range tv.Fields[1:] {
if fm.Nonzero(i) {
r[f.Name] = f.Type.parseValue(p)
} else {
r[f.Name] = f.Type.zero(p)
}
}
if len(p.buf) != 0 {
return nil, fmt.Errorf("%w: leftover data after parsing", ErrStore)
}
return r, nil
}
func (ft fieldType) parseValue(p *parser) any {
switch ft.Kind {
case kindBytes:
return p.TakeBytes(false)
case kindBinaryMarshal:
// We don't have the type available, so we just return the binary data.
return p.TakeBytes(false)
case kindBool:
return true
case kindInt8:
return int8(p.Varint())
case kindInt16:
return int16(p.Varint())
case kindInt32:
return int32(p.Varint())
case kindInt:
i := p.Varint()
if i < math.MinInt32 || i > math.MaxInt32 {
p.Errorf("%w: int %d does not fit in int32", ErrStore, i)
}
return int(i)
case kindInt64:
return p.Varint()
case kindUint8:
return uint8(p.Uvarint())
case kindUint16:
return uint16(p.Uvarint())
case kindUint32:
return uint32(p.Uvarint())
case kindUint:
i := p.Uvarint()
if i > math.MaxUint32 {
p.Errorf("%w: uint %d does not fit in uint32", ErrStore, i)
}
return uint(i)
case kindUint64:
return p.Uvarint()
case kindFloat32:
return math.Float32frombits(uint32(p.Uvarint()))
case kindFloat64:
return math.Float64frombits(p.Uvarint())
case kindString:
return string(p.TakeBytes(false))
case kindTime:
var t time.Time
err := t.UnmarshalBinary(p.TakeBytes(false))
if err != nil {
p.Errorf("%w: parsing time: %v", ErrStore, err)
}
return t
case kindSlice:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
var l []any
for i := 0; i < n; i++ {
if fm.Nonzero(i) {
l = append(l, ft.List.parseValue(p))
} else {
// Always add non-zero elements, or we would
// change the number of elements in a list.
l = append(l, ft.List.zero(p))
}
}
return l
case kindMap:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
m := map[string]any{}
for i := 0; i < n; i++ {
// Converting to string can be ugly, but the best we can do.
k := fmt.Sprintf("%v", ft.MapKey.parseValue(p))
if _, ok := m[k]; ok {
return fmt.Errorf("%w: duplicate key %q in map", ErrStore, k)
}
var v any
if fm.Nonzero(i) {
v = ft.MapValue.parseValue(p)
} else {
v = ft.MapValue.zero(p)
}
m[k] = v
}
return m
case kindStruct:
fm := p.Fieldmap(len(ft.Fields))
m := map[string]any{}
for i, f := range ft.Fields {
if fm.Nonzero(i) {
m[f.Name] = f.Type.parseValue(p)
} else {
m[f.Name] = f.Type.zero(p)
}
}
return m
}
p.Errorf("internal error: unhandled field type %v", ft.Kind)
panic("cannot happen")
}
var zerovalues = map[kind]any{
kindBytes: []byte(nil),
kindBinaryMarshal: []byte(nil), // We don't have the actual type available, so we just return binary data.
kindBool: false,
kindInt8: int8(0),
kindInt16: int16(0),
kindInt32: int32(0),
kindInt: int(0),
kindInt64: int64(0),
kindUint8: uint8(0),
kindUint16: uint16(0),
kindUint32: uint32(0),
kindUint: uint(0),
kindUint64: uint64(0),
kindFloat32: float32(0),
kindFloat64: float64(0),
kindString: "",
kindTime: zerotime,
kindSlice: []any(nil),
kindMap: map[string]any(nil),
kindStruct: map[string]any(nil),
}
func (ft fieldType) zero(p *parser) any {
v, ok := zerovalues[ft.Kind]
if !ok {
p.Errorf("internal error: unhandled zero value for field type %v", ft.Kind)
}
return v
}

78
vendor/github.com/mjl-/bstore/format.md generated vendored Normal file
View File

@ -0,0 +1,78 @@
# Types
Each Go type is stored in its own bucket, after its name. Only subbuckets are
created directly below a type bucket, no key/values. Two subbuckets are always
created: "records" for the data, "types" for the type definitions. Each index
is stored in a subbucket named "index." followed by the name. Unique and
non-unique indices use the same encoding.
# Type versions
Type definitions are stored in the "types" subbucket. The key is a 4 byte
uint32, a version as referenced from a data record. The value is a JSON-encoded
representation of the typeVersion struct.
When a new Go type or changed Go type is registered with a database, a new type
version is added to the "types" subbucket. Data is always inserted/updated with
the most recent type version. But the database may still hold data records
referencing older type versions. Bstore decodes a packed data record with the
referenced type version. For storage efficiency: the type version is reused for
many stored records, a self-describing format (like JSON) would duplicate the
field names in each stored record.
# Record storage
Primary keys of types are used as BoltDB keys and can be of bool, integer
types, strings or byte slices. Floats, time, struct, slice, map, binarymarshal
cannot be stored as primary key. Bools are stored as a single byte 0 or 1.
Integers are stored in their fixed width encoding (eg 4 bytes for 32 bit int).
Signed integers are stored so the fixed-width byte value is ordered for all
signed values, i.e. math.MinInt32 is stored as 4 bytes bigendian with value 0.
For strings and byte slices, only their bytes are stored.
The value stored with a BoltDB key starts with a uvarint "version" of the type.
This refers to a version in the "types" bucket. The primary key is not encoded
again in the data record itself. The remaining fields are space-efficiently
encoded.
After the uvarint version follow as many bytes to fit a bitmap for the direct
struct fields in the type description. Each bit indicates if the value is
nonzero and present in the value that follows. Only non-zero values take up
more space than the single bit and are stored consecutively after the fieldmap:
- Pointers are stored as their non-pointer value. If the pointer is nil, it
is zero in the fieldmap.
- If the underlying type is an signed int or float, or unsigned int, then
varint/uvarint encoding from encoding/binary is used.
- If the underlying type is a string or []byte, uvarint count followed by the
bytes.
- If the underlying type is a bool, the value is always true and no
additional data is present to represent the value. False is represented by
the zero value marked in the fieldmap.
- Slices use a uvarint for the number of elements, followed by a bitmap for
nonzero values, followed by the encoded nonzero elements.
- Maps use a uvariant for the number of key/value pairs, followed by a
fieldmap for the values (the keys are always present), followed by each
pair: key (always present), value (only if nonzero); key, value; etc.
- If a type is an encoding.BinaryUnmarshaler and encoding.BinaryMarshaler,
then its bytes are stored prefixed with its uvarint length.
- If the type is a struct, its fields are encoded with a field map followed
by the its nonzero field values.
- Other types cannot be represented currently.
In a new type version, the type of a field can be changed as long as existing
records can be decoded into the new Go type. E.g. you can change an int32 into
a int64. You can only change an int64 into a int32 if all values you attempt to
read are small enough to fit in an int32. You cannot change between signed and
unsigned integer, or between string and []byte.
# Index storage
Indexes are stored in subbuckets, named starting with "index." followed by the
index name. Keys are a self-delimiting encodings of the fields that make up the
key, followed by the primary key for the "records" bucket. Values are always
empty in index buckets. For bool and integer types, the same fixed with
encoding as for primary keys in the "records" subbucket is used. Strings are
encoded by their bytes (no \0 allowed) followed by a delimiting \0. Unlike
primary keys, an index can cover a field with type time.Time. Times are encoded
with 8 byte seconds followed by the remaining 4 bytes nanoseconds.

13
vendor/github.com/mjl-/bstore/gendoc.sh generated vendored Normal file
View File

@ -0,0 +1,13 @@
#!/bin/sh
(
cat <<EOF
/*
Command bstore provides commands for inspecting a bstore database.
Subcommands:
EOF
go run cmd/bstore/bstore.go 2>&1 | sed 's/^/ /' | grep -v 'exit status'
echo '*/'
echo 'package main'
) >cmd/bstore/doc.go

282
vendor/github.com/mjl-/bstore/keys.go generated vendored Normal file
View File

@ -0,0 +1,282 @@
package bstore
import (
"encoding/binary"
"fmt"
"math"
"reflect"
"time"
)
/*
The records buckets map a primary key to the record data. The primary key is of
a form that we can scan/range over. So fixed with for integers. For strings and
bytes they are just their byte representation. We do not store the PK in the
record data. This means we cannot store a time.Time as primary key, because we
cannot have the timezone encoded for comparison reasons.
Index keys are similar to PK's. Unique and non-unique indices are encoded the
same. The stored values are always empty, the key consists of the field values
the index was created for, followed by the PK. The encoding of a field is nearly
the same as the encoding of that type as a primary key. The differences: strings
end with a \0 to make them self-delimiting; byte slices are not allowed because
they are not self-delimiting; time.Time is allowed because the time is available
in full (with timezone) in the record data.
*/
// packPK returns the PK bytes representation for the PK value rv.
func packPK(rv reflect.Value) ([]byte, error) {
kv := rv.Interface()
var buf []byte
switch k := kv.(type) {
case string:
buf = []byte(k)
case []byte:
buf = k
case bool:
var b byte
if k {
b = 1
}
buf = []byte{b}
case int8:
buf = []byte{byte(uint8(k + math.MinInt8))}
case int16:
buf = binary.BigEndian.AppendUint16(nil, uint16(k+math.MinInt16))
case int32:
buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32))
case int:
if k < math.MinInt32 || k > math.MaxInt32 {
return nil, fmt.Errorf("%w: int %d does not fit in int32", ErrParam, k)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(k+math.MinInt32))
case int64:
buf = binary.BigEndian.AppendUint64(nil, uint64(k+math.MinInt64))
case uint8:
buf = []byte{k}
case uint16:
buf = binary.BigEndian.AppendUint16(nil, k)
case uint32:
buf = binary.BigEndian.AppendUint32(nil, k)
case uint:
if k > math.MaxUint32 {
return nil, fmt.Errorf("%w: uint %d does not fit in uint32", ErrParam, k)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(k))
case uint64:
buf = binary.BigEndian.AppendUint64(nil, k)
default:
return nil, fmt.Errorf("%w: unsupported primary key type %T", ErrType, kv)
}
return buf, nil
}
// parsePK parses primary key bk into rv.
func parsePK(rv reflect.Value, bk []byte) error {
k, err := typeKind(rv.Type())
if err != nil {
return err
}
switch k {
case kindBytes:
buf := make([]byte, len(bk))
copy(buf, bk)
rv.SetBytes(buf)
return nil
case kindString:
rv.SetString(string(bk))
return nil
}
var need int
switch k {
case kindBool, kindInt8, kindUint8:
need = 1
case kindInt16, kindUint16:
need = 2
case kindInt32, kindUint32, kindInt, kindUint:
need = 4
case kindInt64, kindUint64:
need = 8
}
if len(bk) != need {
return fmt.Errorf("%w: got %d bytes for PK, need %d", ErrStore, len(bk), need)
}
switch k {
case kindBool:
rv.SetBool(bk[0] != 0)
case kindInt8:
rv.SetInt(int64(int8(bk[0]) - math.MinInt8))
case kindInt16:
rv.SetInt(int64(int16(binary.BigEndian.Uint16(bk)) - math.MinInt16))
case kindInt32, kindInt:
rv.SetInt(int64(int32(binary.BigEndian.Uint32(bk)) - math.MinInt32))
case kindInt64:
rv.SetInt(int64(int64(binary.BigEndian.Uint64(bk)) - math.MinInt64))
case kindUint8:
rv.SetUint(uint64(bk[0]))
case kindUint16:
rv.SetUint(uint64(binary.BigEndian.Uint16(bk)))
case kindUint32, kindUint:
rv.SetUint(uint64(binary.BigEndian.Uint32(bk)))
case kindUint64:
rv.SetUint(uint64(binary.BigEndian.Uint64(bk)))
default:
// note: we cannot have kindTime as primary key at the moment.
return fmt.Errorf("%w: unsupported primary key type %v", ErrType, rv.Type())
}
return nil
}
// parseKey parses the PK (last element) of an index key.
// If all is set, also gathers the values before and returns them in the second
// parameter.
func (idx *index) parseKey(buf []byte, all bool) ([]byte, [][]byte, error) {
var err error
var keys [][]byte
take := func(n int) {
if len(buf) < n {
err = fmt.Errorf("%w: not enough bytes in index key", ErrStore)
return
}
if all {
keys = append(keys, buf[:n])
}
buf = buf[n:]
}
fields:
for _, f := range idx.Fields {
if err != nil {
break
}
switch f.Type.Kind {
case kindString:
for i, b := range buf {
if b == 0 {
if all {
keys = append(keys, buf[:i])
}
buf = buf[i+1:]
continue fields
}
}
err = fmt.Errorf("%w: bad string without 0 in index key", ErrStore)
case kindBool:
take(1)
case kindInt8, kindUint8:
take(1)
case kindInt16, kindUint16:
take(2)
case kindInt32, kindUint32, kindInt, kindUint:
take(4)
case kindInt64, kindUint64:
take(8)
case kindTime:
take(8 + 4)
}
}
if err != nil {
return nil, nil, err
}
pk := buf
switch idx.tv.Fields[0].Type.Kind {
case kindBool:
take(1)
case kindInt8, kindUint8:
take(1)
case kindInt16, kindUint16:
take(2)
case kindInt32, kindInt, kindUint32, kindUint:
take(4)
case kindInt64, kindUint64:
take(8)
}
if len(pk) != len(buf) && len(buf) != 0 {
return nil, nil, fmt.Errorf("%w: leftover bytes in index key (%x)", ErrStore, buf)
}
if all {
return pk, keys[:len(keys)-1], nil
}
return pk, nil, nil
}
// packKey returns a key to store in an index: first the prefix without pk, then
// the prefix including pk.
func (idx *index) packKey(rv reflect.Value, pk []byte) ([]byte, []byte, error) {
var l []reflect.Value
for _, f := range idx.Fields {
frv := rv.FieldByIndex(f.structField.Index)
l = append(l, frv)
}
return packIndexKeys(l, pk)
}
// packIndexKeys packs values from l, followed by the pk.
// It returns the key prefix (without pk), and full key with pk.
func packIndexKeys(l []reflect.Value, pk []byte) ([]byte, []byte, error) {
var prek, ik []byte
for _, frv := range l {
k, err := typeKind(frv.Type())
if err != nil {
return nil, nil, err
}
var buf []byte
switch k {
case kindBool:
buf = []byte{0}
if frv.Bool() {
buf[0] = 1
}
case kindInt8:
buf = []byte{byte(int8(frv.Int()) + math.MinInt8)}
case kindInt16:
buf = binary.BigEndian.AppendUint16(nil, uint16(int16(frv.Int())+math.MinInt16))
case kindInt32:
buf = binary.BigEndian.AppendUint32(nil, uint32(int32(frv.Int())+math.MinInt32))
case kindInt:
i := frv.Int()
if i < math.MinInt32 || i > math.MaxInt32 {
return nil, nil, fmt.Errorf("%w: int value %d does not fit in int32", ErrParam, i)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(int32(i)+math.MinInt32))
case kindInt64:
buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Int()+math.MinInt64))
case kindUint8:
buf = []byte{byte(frv.Uint())}
case kindUint16:
buf = binary.BigEndian.AppendUint16(nil, uint16(frv.Uint()))
case kindUint32:
buf = binary.BigEndian.AppendUint32(nil, uint32(frv.Uint()))
case kindUint:
i := frv.Uint()
if i > math.MaxUint32 {
return nil, nil, fmt.Errorf("%w: uint value %d does not fit in uint32", ErrParam, i)
}
buf = binary.BigEndian.AppendUint32(nil, uint32(i))
case kindUint64:
buf = binary.BigEndian.AppendUint64(nil, uint64(frv.Uint()))
case kindString:
buf = []byte(frv.String())
for _, c := range buf {
if c == 0 {
return nil, nil, fmt.Errorf("%w: string used as index key cannot have \\0", ErrParam)
}
}
buf = append(buf, 0)
case kindTime:
tm := frv.Interface().(time.Time)
buf = binary.BigEndian.AppendUint64(nil, uint64(tm.Unix()+math.MinInt64))
buf = binary.BigEndian.AppendUint32(buf, uint32(tm.Nanosecond()))
default:
return nil, nil, fmt.Errorf("internal error: bad type %v for index", frv.Type()) // todo: should be caught when making index type
}
ik = append(ik, buf...)
}
n := len(ik)
ik = append(ik, pk...)
prek = ik[:n]
return prek, ik, nil
}

218
vendor/github.com/mjl-/bstore/nonzero.go generated vendored Normal file
View File

@ -0,0 +1,218 @@
package bstore
import (
"fmt"
"reflect"
)
// isZero returns whether v is the zero value for the fields that we store.
// reflect.IsZero cannot be used on structs because it checks private fields as well.
func (ft fieldType) isZero(v reflect.Value) bool {
if !v.IsValid() {
return true
}
if ft.Ptr {
return v.IsNil()
}
switch ft.Kind {
case kindStruct:
for _, f := range ft.Fields {
if !f.Type.isZero(v.FieldByIndex(f.structField.Index)) {
return false
}
}
return true
}
// Use standard IsZero otherwise, also for kindBinaryMarshal.
return v.IsZero()
}
// checkNonzero compare ofields and nfields (from previous type schema vs newly
// created type schema) for nonzero struct tag. If an existing field got a
// nonzero struct tag added, we verify that there are indeed no nonzero values
// in the database. If there are, we return ErrZero.
func (tx *Tx) checkNonzero(st storeType, tv *typeVersion, ofields, nfields []field) error {
// First we gather paths that we need to check, so we can later simply
// execute those steps on all data we need to read.
paths := &follows{}
next:
for _, f := range nfields {
for _, of := range ofields {
if f.Name == of.Name {
err := f.checkNonzeroGather(&of, paths)
if err != nil {
return err
}
continue next
}
}
if err := f.checkNonzeroGather(nil, paths); err != nil {
return err
}
}
if len(paths.paths) == 0 {
// Common case, not reading all data.
return nil
}
// Finally actually do the checks.
// todo: if there are only top-level fields to check, and we have an index, we can use the index check this without reading all data.
return tx.checkNonzeroPaths(st, tv, paths.paths)
}
type follow struct {
mapKey, mapValue bool
field field
}
type follows struct {
current []follow
paths [][]follow
}
func (f *follows) push(ff follow) {
f.current = append(f.current, ff)
}
func (f *follows) pop() {
f.current = f.current[:len(f.current)-1]
}
func (f *follows) add() {
f.paths = append(f.paths, append([]follow{}, f.current...))
}
func (f field) checkNonzeroGather(of *field, paths *follows) error {
paths.push(follow{field: f})
defer paths.pop()
if f.Nonzero && (of == nil || !of.Nonzero) {
paths.add()
}
if of != nil {
return f.Type.checkNonzeroGather(of.Type, paths)
}
return nil
}
func (ft fieldType) checkNonzeroGather(oft fieldType, paths *follows) error {
switch ft.Kind {
case kindMap:
paths.push(follow{mapKey: true})
if err := ft.MapKey.checkNonzeroGather(*oft.MapKey, paths); err != nil {
return err
}
paths.pop()
paths.push(follow{mapValue: true})
if err := ft.MapValue.checkNonzeroGather(*oft.MapValue, paths); err != nil {
return err
}
paths.pop()
case kindSlice:
err := ft.List.checkNonzeroGather(*oft.List, paths)
if err != nil {
return err
}
case kindStruct:
next:
for _, ff := range ft.Fields {
for _, off := range oft.Fields {
if ff.Name == off.Name {
err := ff.checkNonzeroGather(&off, paths)
if err != nil {
return err
}
continue next
}
}
err := ff.checkNonzeroGather(nil, paths)
if err != nil {
return err
}
}
}
return nil
}
// checkNonzero reads through all records of a type, and checks that the fields
// indicated by paths are nonzero. If not, ErrZero is returned.
func (tx *Tx) checkNonzeroPaths(st storeType, tv *typeVersion, paths [][]follow) error {
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
return rb.ForEach(func(bk, bv []byte) error {
tx.stats.Records.Cursor++
rv, err := st.parseNew(bk, bv)
if err != nil {
return err
}
// todo optimization: instead of parsing the full record, use the fieldmap to see if the value is nonzero.
for _, path := range paths {
frv := rv.FieldByIndex(path[0].field.structField.Index)
if err := path[0].field.checkNonzero(frv, path[1:]); err != nil {
return err
}
}
return nil
})
}
func (f field) checkNonzero(rv reflect.Value, path []follow) error {
if len(path) == 0 {
if !f.Nonzero {
return fmt.Errorf("internal error: checkNonzero: expected field to have Nonzero set")
}
if f.Type.isZero(rv) {
return fmt.Errorf("%w: field %q", ErrZero, f.Name)
}
return nil
}
return f.Type.checkNonzero(rv, path)
}
func (ft fieldType) checkNonzero(rv reflect.Value, path []follow) error {
switch ft.Kind {
case kindMap:
follow := path[0]
path = path[1:]
key := follow.mapKey
if !key && !follow.mapValue {
return fmt.Errorf("internal error: following map, expected mapKey or mapValue, got %#v", follow)
}
iter := rv.MapRange()
for iter.Next() {
var err error
if key {
err = ft.MapKey.checkNonzero(iter.Key(), path)
} else {
err = ft.MapValue.checkNonzero(iter.Value(), path)
}
if err != nil {
return err
}
}
case kindSlice:
n := rv.Len()
for i := 0; i < n; i++ {
if err := ft.List.checkNonzero(rv.Index(i), path); err != nil {
return err
}
}
case kindStruct:
follow := path[0]
path = path[1:]
frv := rv.FieldByIndex(follow.field.structField.Index)
if err := follow.field.checkNonzero(frv, path); err != nil {
return err
}
default:
return fmt.Errorf("internal error: checkNonzero with non-empty path, but kind %v", ft.Kind)
}
return nil
}

276
vendor/github.com/mjl-/bstore/pack.go generated vendored Normal file
View File

@ -0,0 +1,276 @@
package bstore
import (
"bytes"
"encoding"
"encoding/binary"
"fmt"
"math"
"reflect"
"time"
)
// fieldmap represents a bitmap indicating which fields are actually stored and
// can be parsed. zero values for fields are not otherwise stored.
type fieldmap struct {
max int // Required number of fields.
buf []byte // Bitmap, we write the next 0/1 at bit n.
n int // Fields seen so far.
offset int // In final output, we write buf back after finish. Only relevant for packing.
Errorf func(format string, args ...any)
}
// add bit to fieldmap indicating if the field is nonzero.
func (f *fieldmap) Field(nonzero bool) {
o := f.n / 8
if f.n >= f.max {
f.Errorf("internal error: too many fields, max %d", f.max)
}
if nonzero {
f.buf[o] |= 1 << (7 - f.n%8)
}
f.n++
}
// check if field i is nonzero.
func (f *fieldmap) Nonzero(i int) bool {
v := f.buf[i/8]&(1<<(7-i%8)) != 0
return v
}
type packer struct {
b *bytes.Buffer
offset int
fieldmaps []*fieldmap // Pending fieldmaps, not excluding fieldmap below.
fieldmap *fieldmap // Currently active.
popped []*fieldmap // Completed fieldmaps, to be written back during finish.
}
func (p *packer) Errorf(format string, args ...any) {
panic(packErr{fmt.Errorf(format, args...)})
}
// Push a new fieldmap on the stack for n fields.
func (p *packer) PushFieldmap(n int) {
p.fieldmaps = append(p.fieldmaps, p.fieldmap)
buf := make([]byte, (n+7)/8)
p.fieldmap = &fieldmap{max: n, buf: buf, offset: p.offset, Errorf: p.Errorf}
p.Write(buf) // Updates offset. Write errors cause panic.
}
// Pop a fieldmap from the stack. It is remembered in popped for writing the
// bytes during finish.
func (p *packer) PopFieldmap() {
if p.fieldmap.n != p.fieldmap.max {
p.Errorf("internal error: fieldmap n %d != max %d", p.fieldmap.n, p.fieldmap.max)
}
p.popped = append(p.popped, p.fieldmap)
p.fieldmap = p.fieldmaps[len(p.fieldmaps)-1]
p.fieldmaps = p.fieldmaps[:len(p.fieldmaps)-1]
}
// Finish writes back finished (popped) fieldmaps to the correct offset,
// returning the final bytes representation of this record.
func (p *packer) Finish() []byte {
if p.fieldmap != nil {
p.Errorf("internal error: leftover fieldmap during finish")
}
buf := p.b.Bytes()
for _, f := range p.popped {
copy(buf[f.offset:], f.buf)
}
return buf
}
// Field adds field with nonzeroness to the current fieldmap.
func (p *packer) Field(nonzero bool) {
p.fieldmap.Field(nonzero)
}
func (p *packer) Write(buf []byte) (int, error) {
n, err := p.b.Write(buf)
if err != nil {
p.Errorf("write: %w", err)
}
if n > 0 {
p.offset += n
}
return n, err
}
func (p *packer) AddBytes(buf []byte) {
p.Uvarint(uint64(len(buf)))
p.Write(buf) // Write errors cause panic.
}
func (p *packer) Uvarint(v uint64) {
buf := make([]byte, binary.MaxVarintLen64)
o := binary.PutUvarint(buf, v)
p.Write(buf[:o]) // Write errors cause panic.
}
func (p *packer) Varint(v int64) {
buf := make([]byte, binary.MaxVarintLen64)
o := binary.PutVarint(buf, v)
p.Write(buf[:o]) // Write errors cause panic.
}
type packErr struct {
err error
}
// pack rv (reflect.Struct), excluding the primary key field.
func (st storeType) pack(rv reflect.Value) (rbuf []byte, rerr error) {
p := &packer{b: &bytes.Buffer{}}
defer func() {
x := recover()
if x == nil {
return
}
perr, ok := x.(packErr)
if ok {
rerr = perr.err
return
}
panic(x)
}()
st.Current.pack(p, rv)
return p.Finish(), nil
}
func (tv typeVersion) pack(p *packer, rv reflect.Value) {
// When parsing, the same typeVersion (type schema) is used to
// interpret the bytes correctly.
p.Uvarint(uint64(tv.Version))
p.PushFieldmap(len(tv.Fields) - 1)
for _, f := range tv.Fields[1:] {
nrv := rv.FieldByIndex(f.structField.Index)
if f.Type.isZero(nrv) {
if f.Nonzero {
p.Errorf("%w: %q", ErrZero, f.Name)
}
p.Field(false)
// Pretend to pack to get the nonzero checks.
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv)
}
} else {
p.Field(true)
f.Type.pack(p, nrv)
}
}
p.PopFieldmap()
}
// pack the nonzero value rv.
func (ft fieldType) pack(p *packer, rv reflect.Value) {
if ft.Ptr {
rv = rv.Elem()
}
switch ft.Kind {
case kindBytes:
p.AddBytes(rv.Bytes())
case kindBinaryMarshal:
v := rv
buf, err := v.Interface().(encoding.BinaryMarshaler).MarshalBinary()
if err != nil {
p.Errorf("marshalbinary: %w", err)
}
p.AddBytes(buf)
case kindBool:
// No value needed. If false, it would be zero, handled above,
// with a 0 in the fieldmap.
case kindInt:
v := rv.Int()
if v < math.MinInt32 || v > math.MaxInt32 {
p.Errorf("%w: int %d does not fit in int32", ErrParam, v)
}
p.Varint(v)
case kindInt8, kindInt16, kindInt32, kindInt64:
p.Varint(rv.Int())
case kindUint8, kindUint16, kindUint32, kindUint64:
p.Uvarint(rv.Uint())
case kindUint:
v := rv.Uint()
if v > math.MaxUint32 {
p.Errorf("%w: uint %d does not fit in uint32", ErrParam, v)
}
p.Uvarint(v)
case kindFloat32:
p.Uvarint(uint64(math.Float32bits(rv.Interface().(float32))))
case kindFloat64:
p.Uvarint(uint64(math.Float64bits(rv.Interface().(float64))))
case kindString:
p.AddBytes([]byte(rv.String()))
case kindTime:
buf, err := rv.Interface().(time.Time).MarshalBinary()
if err != nil {
p.Errorf("%w: pack time: %s", ErrParam, err)
}
p.AddBytes(buf)
case kindSlice:
n := rv.Len()
p.Uvarint(uint64(n))
p.PushFieldmap(n)
for i := 0; i < n; i++ {
nrv := rv.Index(i)
if ft.List.isZero(nrv) {
p.Field(false)
// Pretend to pack to get the nonzero checks of the element.
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
ft.List.pack(&packer{b: &bytes.Buffer{}}, nrv)
}
} else {
p.Field(true)
ft.List.pack(p, nrv)
}
}
p.PopFieldmap()
case kindMap:
// We write a fieldmap for zeroness of the values. The keys are unique, so there
// can only be max 1 zero key. But there can be many zero values. struct{} is
// common in Go, good to support that efficiently.
n := rv.Len()
p.Uvarint(uint64(n))
p.PushFieldmap(n)
iter := rv.MapRange()
for iter.Next() {
ft.MapKey.pack(p, iter.Key())
v := iter.Value()
if ft.MapValue.isZero(v) {
p.Field(false)
// Pretend to pack to get the nonzero checks of the key type.
if v.IsValid() && (v.Kind() != reflect.Ptr || !v.IsNil()) {
ft.MapValue.pack(&packer{b: &bytes.Buffer{}}, v)
}
} else {
p.Field(true)
ft.MapValue.pack(p, v)
}
}
p.PopFieldmap()
case kindStruct:
p.PushFieldmap(len(ft.Fields))
for _, f := range ft.Fields {
nrv := rv.FieldByIndex(f.structField.Index)
if f.Type.isZero(nrv) {
if f.Nonzero {
p.Errorf("%w: %q", ErrZero, f.Name)
}
p.Field(false)
// Pretend to pack to get the nonzero checks.
if nrv.IsValid() && (nrv.Kind() != reflect.Ptr || !nrv.IsNil()) {
f.Type.pack(&packer{b: &bytes.Buffer{}}, nrv)
}
} else {
p.Field(true)
f.Type.pack(p, nrv)
}
}
p.PopFieldmap()
default:
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
}
}

321
vendor/github.com/mjl-/bstore/parse.go generated vendored Normal file
View File

@ -0,0 +1,321 @@
package bstore
import (
"encoding"
"encoding/binary"
"fmt"
"math"
"reflect"
"time"
)
type parser struct {
buf []byte
orig []byte
}
func (p *parser) Errorf(format string, args ...any) {
panic(parseErr{fmt.Errorf(format, args...)})
}
func (p *parser) checkInt(un uint64) int {
if un > math.MaxInt32 {
p.Errorf("%w: uvarint %d does not fit in int32", ErrStore, un)
}
return int(un)
}
// Fieldmap starts a new fieldmap for n fields.
func (p *parser) Fieldmap(n int) *fieldmap {
// log.Printf("parse fieldmap %d bits", n)
nb := (n + 7) / 8
buf := p.Take(nb)
return &fieldmap{n, buf, 0, 0, p.Errorf}
}
// Take reads nb bytes.
func (p *parser) Take(nb int) []byte {
// log.Printf("take %d", nb)
if len(p.buf) < nb {
p.Errorf("%w: not enough bytes", ErrStore)
}
buf := p.buf[:nb]
p.buf = p.buf[nb:]
return buf
}
// TakeBytes reads a uvarint representing the size of the bytes, followed by
// that number of bytes.
// dup is needed if you need to hold on to the bytes. Values from BoltDB are
// only valid in the transaction, and not meant to be modified and are
// memory-mapped read-only.
func (p *parser) TakeBytes(dup bool) []byte {
un := p.Uvarint()
n := p.checkInt(un)
buf := p.Take(n)
if dup {
// todo: check for a max size, beyond which we refuse to allocate?
nbuf := make([]byte, len(buf))
copy(nbuf, buf)
buf = nbuf
}
return buf
}
func (p *parser) Uvarint() uint64 {
v, n := binary.Uvarint(p.buf)
if n == 0 {
p.Errorf("%w: uvarint: not enough bytes", ErrStore)
}
if n < 0 {
p.Errorf("%w: uvarint overflow", ErrStore)
}
// log.Printf("take uvarint, %d bytes", n)
p.buf = p.buf[n:]
return v
}
func (p *parser) Varint() int64 {
v, n := binary.Varint(p.buf)
if n == 0 {
p.Errorf("%w: varint: not enough bytes", ErrStore)
}
if n < 0 {
p.Errorf("%w: varint overflow", ErrStore)
}
// log.Printf("take varint, %d bytes", n)
p.buf = p.buf[n:]
return v
}
type parseErr struct {
err error
}
// parse rv (reflect.Struct) from buf.
// does not part primary key field.
func (st storeType) parse(rv reflect.Value, buf []byte) (rerr error) {
p := &parser{buf: buf, orig: buf}
var version uint32
defer func() {
x := recover()
if x == nil {
return
}
perr, ok := x.(parseErr)
if ok {
rerr = fmt.Errorf("%w (version %d, buf %x, orig %x)", perr.err, version, p.buf, p.orig)
return
}
panic(x)
}()
version = uint32(p.Uvarint())
tv, ok := st.Versions[version]
if !ok {
return fmt.Errorf("%w: unknown type version %d", ErrStore, version)
}
tv.parse(p, rv)
if len(p.buf) != 0 {
return fmt.Errorf("%w: leftover data after parsing", ErrStore)
}
return nil
}
// parseNew parses bk and bv into a newly created value of type st.Type.
func (st storeType) parseNew(bk, bv []byte) (reflect.Value, error) {
rv := reflect.New(st.Type).Elem()
if err := st.parseFull(rv, bk, bv); err != nil {
return reflect.Value{}, err
}
return rv, nil
}
// parseFull parses a full record from bk and bv into value rv, which must be
// of type st.Type.
func (st storeType) parseFull(rv reflect.Value, bk, bv []byte) error {
if err := parsePK(rv.Field(0), bk); err != nil {
return err
}
err := st.parse(rv, bv)
if err != nil {
return err
}
return nil
}
func (tv typeVersion) parse(p *parser, rv reflect.Value) {
// First field is the primary key, stored as boltdb key only, not in
// the value.
fm := p.Fieldmap(len(tv.Fields) - 1)
for i, f := range tv.Fields[1:] {
if f.structField.Type == nil {
// Do not parse this field in the current Go type, but
// we must still skip over the bytes.
if fm.Nonzero(i) {
f.Type.skip(p)
}
continue
}
if fm.Nonzero(i) {
f.Type.parse(p, rv.FieldByIndex(f.structField.Index))
} else if f.Nonzero {
// Consistency check. Should not happen, we enforce nonzeroness.
p.Errorf("%w: unexpected nonzero value for %q", ErrStore, f.Name)
} else {
rv.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type))
}
}
}
// parse a nonzero fieldType.
func (ft fieldType) parse(p *parser, rv reflect.Value) {
// Because we allow schema changes from ptr to nonptr, rv can be a pointer or direct value regardless of ft.Ptr.
if rv.Kind() == reflect.Ptr {
nrv := reflect.New(rv.Type().Elem())
rv.Set(nrv)
rv = nrv.Elem()
}
switch ft.Kind {
case kindBytes:
rv.SetBytes(p.TakeBytes(true))
case kindBinaryMarshal:
buf := p.TakeBytes(false)
t := rv.Type()
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
v := reflect.New(t)
err := v.Interface().(encoding.BinaryUnmarshaler).UnmarshalBinary(buf)
if err != nil {
panic(parseErr{err})
}
if rv.Type().Kind() == reflect.Ptr {
rv.Set(v)
} else {
rv.Set(v.Elem())
}
case kindBool:
rv.SetBool(true)
case kindInt:
v := p.Varint()
if v < math.MinInt32 || v > math.MaxInt32 {
p.Errorf("%w: int %d does not fit in int32", ErrStore, v)
}
rv.SetInt(v)
case kindInt8, kindInt16, kindInt32, kindInt64:
rv.SetInt(p.Varint())
case kindUint:
v := p.Uvarint()
if v > math.MaxUint32 {
p.Errorf("%w: uint %d does not fit in uint32", ErrStore, v)
}
rv.SetUint(v)
case kindUint8, kindUint16, kindUint32, kindUint64:
rv.SetUint(p.Uvarint())
case kindFloat32:
rv.SetFloat(float64(math.Float32frombits(uint32(p.Uvarint()))))
case kindFloat64:
rv.SetFloat(math.Float64frombits(p.Uvarint()))
case kindString:
rv.SetString(string(p.TakeBytes(false)))
case kindTime:
err := rv.Addr().Interface().(*time.Time).UnmarshalBinary(p.TakeBytes(false))
if err != nil {
p.Errorf("%w: parsing time: %s", ErrStore, err)
}
case kindSlice:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
slc := reflect.MakeSlice(rv.Type(), n, n)
for i := 0; i < int(n); i++ {
if fm.Nonzero(i) {
ft.List.parse(p, slc.Index(i))
}
}
rv.Set(slc)
case kindMap:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
mp := reflect.MakeMapWithSize(rv.Type(), n)
for i := 0; i < n; i++ {
mk := reflect.New(rv.Type().Key()).Elem()
ft.MapKey.parse(p, mk)
mv := reflect.New(rv.Type().Elem()).Elem()
if fm.Nonzero(i) {
ft.MapValue.parse(p, mv)
}
mp.SetMapIndex(mk, mv)
}
rv.Set(mp)
case kindStruct:
fm := p.Fieldmap(len(ft.Fields))
strct := reflect.New(rv.Type()).Elem()
for i, f := range ft.Fields {
if f.structField.Type == nil {
f.Type.skip(p)
continue
}
if fm.Nonzero(i) {
f.Type.parse(p, strct.FieldByIndex(f.structField.Index))
} else if f.Nonzero {
// Consistency check, we enforce that nonzero is not stored if not allowed.
p.Errorf("%w: %q", ErrZero, f.Name)
} else {
strct.FieldByIndex(f.structField.Index).Set(reflect.Zero(f.structField.Type))
}
}
rv.Set(strct)
default:
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
}
}
// skip over the bytes for this fieldType. Needed when an older typeVersion has
// a field that the current reflect.Type does not (can) have.
func (ft fieldType) skip(p *parser) {
switch ft.Kind {
case kindBytes, kindBinaryMarshal, kindString:
p.TakeBytes(false)
case kindBool:
case kindInt8, kindInt16, kindInt32, kindInt, kindInt64:
p.Varint()
case kindUint8, kindUint16, kindUint32, kindUint, kindUint64, kindFloat32, kindFloat64:
p.Uvarint()
case kindTime:
p.TakeBytes(false)
case kindSlice:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
for i := 0; i < n; i++ {
if fm.Nonzero(i) {
ft.List.skip(p)
}
}
case kindMap:
un := p.Uvarint()
n := p.checkInt(un)
fm := p.Fieldmap(n)
for i := 0; i < n; i++ {
ft.MapKey.skip(p)
if fm.Nonzero(i) {
ft.MapValue.skip(p)
}
}
case kindStruct:
fm := p.Fieldmap(len(ft.Fields))
for i, f := range ft.Fields {
if fm.Nonzero(i) {
f.Type.skip(p)
}
}
default:
p.Errorf("internal error: unhandled field type") // should be prevented when registering type
}
}

341
vendor/github.com/mjl-/bstore/plan.go generated vendored Normal file
View File

@ -0,0 +1,341 @@
package bstore
import (
"bytes"
"fmt"
"reflect"
"sort"
)
// Plan represents a plan to execute a query, possibly using a simple/quick
// bucket "get" or cursor scan (forward/backward) on either the records or an
// index.
type plan[T any] struct {
// The index for this plan. If nil, we are using pk's, in which case
// "keys" below can be nil for a range scan with start/stop (possibly empty
// for full scan), or non-nil for looking up specific keys.
idx *index
// Use full unique index to get specific values from keys. idx above can be
// a unique index that we only use partially. In that case, this field is
// false.
unique bool
// If not nil, used to fetch explicit keys when using pk or unique
// index. Required non-nil for unique.
keys [][]byte
desc bool // Direction of the range scan.
start []byte // First key to scan. Filters below may still apply. If desc, this value is > than stop (if it is set). If nil, we begin ranging at the first or last (for desc) key.
stop []byte // Last key to scan. Can be nil independently of start.
startInclusive bool // If the start and stop values are inclusive or exclusive.
stopInclusive bool
// Filter we need to apply on after retrieving the record. If all
// original filters from a query were handled by "keys" above, or by a
// range scan, this field is empty.
filters []filter[T]
// Orders we need to apply after first retrieving all records. As with
// filters, if a range scan takes care of an ordering from the query,
// this field is empty.
orders []order
}
// selectPlan selects the best plan for this query.
func (q *Query[T]) selectPlan() (*plan[T], error) {
// Simple case first: List of known IDs. We can just fetch them from
// the records bucket by their primary keys. This is common for a
// "Get" query.
if q.xfilterIDs != nil {
orders := q.xorders
keys := q.xfilterIDs.pks
// If there is an ordering on the PK field, we do the ordering here.
if len(orders) > 0 && orders[0].field.Name == q.st.Current.Fields[0].Name {
asc := orders[0].asc
sort.Slice(keys, func(i, j int) bool {
cmp := bytes.Compare(keys[i], keys[j])
return asc && cmp < 0 || !asc && cmp > 0
})
orders = orders[1:]
}
p := &plan[T]{
keys: keys,
filters: q.xfilters,
orders: orders,
}
return p, nil
}
// Try using a fully matched unique index. We build a map with all
// fields that have an equal or in filter. So we can easily look
// through our unique indices and get a match. We only look at a single
// filter per field. If there are multiple, we would use the last one.
// That's okay, we'll filter records out when we execute the leftover
// filters. Probably not common.
// This is common for filterEqual and filterIn on
// fields that have a unique index.
equalsIn := map[string]*filter[T]{}
for i := range q.xfilters {
ff := &q.xfilters[i]
switch f := (*ff).(type) {
case filterEqual[T]:
equalsIn[f.field.Name] = ff
case filterIn[T]:
equalsIn[f.field.Name] = ff
}
}
indices:
for _, idx := range q.st.Current.Indices {
// Direct fetches only for unique indices.
if !idx.Unique {
continue
}
for _, f := range idx.Fields {
if _, ok := equalsIn[f.Name]; !ok {
// At least one index field does not have a filter.
continue indices
}
}
// Calculate all keys that we need to retrieve from the index.
// todo optimization: if there is a sort involving these fields, we could do the sorting before fetching data.
// todo optimization: we can generate the keys on demand, will help when limit is in use: we are not generating all keys.
var keys [][]byte
var skipFilters []*filter[T] // Filters to remove from the full list because they are handled by quering the index.
for i, f := range idx.Fields {
var rvalues []reflect.Value
ff := equalsIn[f.Name]
skipFilters = append(skipFilters, ff)
switch fi := (*ff).(type) {
case filterEqual[T]:
rvalues = []reflect.Value{fi.rvalue}
case filterIn[T]:
rvalues = fi.rvalues
default:
return nil, fmt.Errorf("internal error: bad filter %T", equalsIn[f.Name])
}
fekeys := make([][]byte, len(rvalues))
for j, fv := range rvalues {
key, _, err := packIndexKeys([]reflect.Value{fv}, nil)
if err != nil {
q.error(err)
return nil, err
}
fekeys[j] = key
}
if i == 0 {
keys = fekeys
continue
}
// Multiply current keys with the new values.
nkeys := make([][]byte, 0, len(keys)*len(fekeys))
for _, k := range keys {
for _, fk := range fekeys {
nk := append(append([]byte{}, k...), fk...)
nkeys = append(nkeys, nk)
}
}
keys = nkeys
}
p := &plan[T]{
idx: idx,
unique: true,
keys: keys,
filters: dropFilters(q.xfilters, skipFilters),
orders: q.xorders,
}
return p, nil
}
// Try all other indices. We treat them all as non-unique indices now.
// We want to use the one with as many "equal" prefix fields as
// possible. Then we hope to use a scan on the remaining, either
// because of a filterCompare, or for an ordering. If there is a limit,
// orderings are preferred over compares.
equals := map[string]*filter[T]{}
for i := range q.xfilters {
ff := &q.xfilters[i]
switch f := (*ff).(type) {
case filterEqual[T]:
equals[f.field.Name] = ff
}
}
// We are going to generate new plans, and keep the new one if it is better than what we have.
var p *plan[T]
var nequals int
var nrange int
var ordered bool
evaluatePKOrIndex := func(idx *index) error {
var isPK bool
var packKeys func([]reflect.Value) ([]byte, error)
if idx == nil {
// Make pretend index.
isPK = true
idx = &index{
Fields: []field{q.st.Current.Fields[0]},
}
packKeys = func(l []reflect.Value) ([]byte, error) {
return packPK(l[0])
}
} else {
packKeys = func(l []reflect.Value) ([]byte, error) {
key, _, err := packIndexKeys(l, nil)
return key, err
}
}
var neq = 0
// log.Printf("idx %v", idx)
var skipFilters []*filter[T]
for _, f := range idx.Fields {
if ff, ok := equals[f.Name]; ok {
skipFilters = append(skipFilters, ff)
neq++
} else {
break
}
}
// See if the next field can be used for compare.
var gx, lx *filterCompare[T]
var nrng int
var order *order
orders := q.xorders
if neq < len(idx.Fields) {
nf := idx.Fields[neq]
for i := range q.xfilters {
ff := &q.xfilters[i]
switch f := (*ff).(type) {
case filterCompare[T]:
if f.field.Name != nf.Name {
continue
}
switch f.op {
case opGreater, opGreaterEqual:
if gx == nil {
gx = &f
skipFilters = append(skipFilters, ff)
nrng++
}
case opLess, opLessEqual:
if lx == nil {
lx = &f
skipFilters = append(skipFilters, ff)
nrng++
}
}
}
}
// See if it can be used for ordering.
// todo optimization: we could use multiple orders
if len(orders) > 0 && orders[0].field.Name == nf.Name {
order = &orders[0]
orders = orders[1:]
}
}
// See if this is better than what we had.
if !(neq > nequals || (neq == nequals && (nrng > nrange || order != nil && !ordered && (q.xlimit > 0 || nrng == nrange)))) {
// log.Printf("plan not better, neq %d, nrng %d, limit %d, order %v ordered %v", neq, nrng, q.limit, order, ordered)
return nil
}
nequals = neq
nrange = nrng
ordered = order != nil
// Calculate the prefix key.
var kvalues []reflect.Value
for i := 0; i < neq; i++ {
f := idx.Fields[i]
kvalues = append(kvalues, (*equals[f.Name]).(filterEqual[T]).rvalue)
}
var key []byte
var err error
if neq > 0 {
key, err = packKeys(kvalues)
if err != nil {
return err
}
}
start := key
stop := key
if gx != nil {
k, err := packKeys([]reflect.Value{gx.value})
if err != nil {
return err
}
start = append(append([]byte{}, start...), k...)
}
if lx != nil {
k, err := packKeys([]reflect.Value{lx.value})
if err != nil {
return err
}
stop = append(append([]byte{}, stop...), k...)
}
startInclusive := gx == nil || gx.op != opGreater
stopInclusive := lx == nil || lx.op != opLess
if order != nil && !order.asc {
start, stop = stop, start
startInclusive, stopInclusive = stopInclusive, startInclusive
}
if isPK {
idx = nil // Clear our fake index for PK.
}
p = &plan[T]{
idx: idx,
desc: order != nil && !order.asc,
start: start,
stop: stop,
startInclusive: startInclusive,
stopInclusive: stopInclusive,
filters: dropFilters(q.xfilters, skipFilters),
orders: orders,
}
return nil
}
if err := evaluatePKOrIndex(nil); err != nil {
q.error(err)
return nil, q.err
}
for _, idx := range q.st.Current.Indices {
if err := evaluatePKOrIndex(idx); err != nil {
q.error(err)
return nil, q.err
}
}
if p != nil {
return p, nil
}
// We'll just do a scan over all data.
p = &plan[T]{
filters: q.xfilters,
orders: q.xorders,
}
return p, nil
}
func dropFilters[T any](filters []T, skip []*T) []T {
n := make([]T, 0, len(filters)-len(skip))
next:
for i := range filters {
f := &filters[i]
for _, s := range skip {
if f == s {
continue next
}
}
n = append(n, *f)
}
return n
}

1130
vendor/github.com/mjl-/bstore/query.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1215
vendor/github.com/mjl-/bstore/register.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

105
vendor/github.com/mjl-/bstore/stats.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package bstore
// StatsKV represent operations on the underlying BoltDB key/value store.
type StatsKV struct {
Get uint
Put uint // For Stats.Bucket, this counts calls of CreateBucket.
Delete uint
Cursor uint // Any cursor operation: Seek/First/Last/Next/Prev.
}
// Stats tracks DB/Tx/Query statistics, mostly counters.
type Stats struct {
// Number of read-only or writable transactions. Set for DB only.
Reads uint
Writes uint
Bucket StatsKV // Use of buckets.
Records StatsKV // Use of records bucket for types.
Index StatsKV // Use of index buckets for types.
// Operations that modify the database. Each record is counted, e.g.
// for a query that updates/deletes multiple records.
Get uint
Insert uint
Update uint
Delete uint
Queries uint // Total queries executed.
PlanTableScan uint // Full table scans.
PlanPK uint // Primary key get.
PlanUnique uint // Full key Unique index get.
PlanPKScan uint // Scan over primary keys.
PlanIndexScan uint // Scan over index.
Sort uint // In-memory collect and sort.
LastType string // Last type queried.
LastIndex string // Last index for LastType used for a query, or empty.
LastOrdered bool // Whether last scan (PK or index) use was ordered, e.g. for sorting or because of a comparison filter.
LastAsc bool // If ordered, whether last index scan was ascending.
}
func (skv *StatsKV) add(n StatsKV) {
skv.Get += n.Get
skv.Put += n.Put
skv.Delete += n.Delete
skv.Cursor += n.Cursor
}
func (skv *StatsKV) sub(n StatsKV) {
skv.Get -= n.Get
skv.Put -= n.Put
skv.Delete -= n.Delete
skv.Cursor -= n.Cursor
}
func (st *Stats) add(n Stats) {
st.Reads += n.Reads
st.Writes += n.Writes
st.Bucket.add(n.Bucket)
st.Records.add(n.Records)
st.Index.add(n.Index)
st.Get += n.Get
st.Insert += n.Insert
st.Update += n.Update
st.Delete += n.Delete
st.Queries += n.Queries
st.PlanTableScan += n.PlanTableScan
st.PlanPK += n.PlanPK
st.PlanUnique += n.PlanUnique
st.PlanPKScan += n.PlanPKScan
st.PlanIndexScan += n.PlanIndexScan
st.Sort += n.Sort
st.LastType = n.LastType
st.LastIndex = n.LastIndex
st.LastOrdered = n.LastOrdered
st.LastAsc = n.LastAsc
}
// Sub returns st with the counters from o subtracted.
func (st Stats) Sub(o Stats) Stats {
st.Reads -= o.Reads
st.Writes -= o.Writes
st.Bucket.sub(o.Bucket)
st.Records.sub(o.Records)
st.Index.sub(o.Index)
st.Get -= o.Get
st.Insert -= o.Insert
st.Update -= o.Update
st.Delete -= o.Delete
st.Queries -= o.Queries
st.PlanTableScan -= o.PlanTableScan
st.PlanPK -= o.PlanPK
st.PlanUnique -= o.PlanUnique
st.PlanPKScan -= o.PlanPKScan
st.PlanIndexScan -= o.PlanIndexScan
st.Sort -= o.Sort
return st
}

566
vendor/github.com/mjl-/bstore/store.go generated vendored Normal file
View File

@ -0,0 +1,566 @@
package bstore
import (
"encoding"
"errors"
"fmt"
"io"
"io/fs"
"os"
"reflect"
"sync"
"time"
bolt "go.etcd.io/bbolt"
)
var (
ErrAbsent = errors.New("absent") // If a function can return an ErrAbsent, it can be compared directly, without errors.Is.
ErrZero = errors.New("must be nonzero")
ErrUnique = errors.New("not unique")
ErrReference = errors.New("referential inconsistency")
ErrMultiple = errors.New("multiple results")
ErrSeq = errors.New("highest autoincrement sequence value reached")
ErrType = errors.New("unknown/bad type")
ErrIncompatible = errors.New("incompatible types")
ErrFinished = errors.New("query finished")
ErrStore = errors.New("internal/storage error") // E.g. when buckets disappear, possibly by external users of the underlying BoltDB database.
ErrParam = errors.New("bad parameters")
errTxClosed = errors.New("transaction is closed")
errNestedIndex = errors.New("struct tags index/unique only allowed at top-level structs")
)
var sanityChecks bool // Only enabled during tests.
// DB is a database storing Go struct values in an underlying bolt database.
// DB is safe for concurrent use, unlike a Tx or a Query.
type DB struct {
bdb *bolt.DB
// Read transaction take an rlock on types. Register can make changes and
// needs a wlock.
typesMutex sync.RWMutex
types map[reflect.Type]storeType
typeNames map[string]storeType // Go type name to store type, for checking duplicates.
statsMutex sync.Mutex
stats Stats
}
// Tx is a transaction on DB.
//
// A Tx is not safe for concurrent use.
type Tx struct {
db *DB // If nil, this transaction is closed.
btx *bolt.Tx
bucketCache map[bucketKey]*bolt.Bucket
stats Stats
}
// bucketKey represents a subbucket for a type.
type bucketKey struct {
typeName string
sub string // Empty for top-level type bucket, otherwise "records", "types" or starting with "index.".
}
type index struct {
Unique bool
Name string // Normally named after the field. But user can specify alternative name with "index" or "unique" struct tag with parameter.
Fields []field
tv *typeVersion
}
type storeType struct {
Name string // Name of type as stored in database. Different from the current Go type name if the uses the "typename" struct tag.
Type reflect.Type // Type we parse into for new values.
Current *typeVersion
// Earlier schema versions. Older type versions can still be stored. We
// prepare them for parsing into the reflect.Type. Some stored fields in
// old versions may be ignored: when a later schema has removed the field,
// that old stored field is considered deleted and will be ignored when
// parsing.
Versions map[uint32]*typeVersion
}
// note: when changing, possibly update func equal as well.
type typeVersion struct {
Version uint32 // First uvarint of a stored record references this version.
OndiskVersion uint32 // Version of on-disk format. Currently always 1.
Noauto bool // If true, the primary key is an int but opted out of autoincrement.
Fields []field // Fields that we store. Embed/anonymous fields are kept separately in embedFields, and are not stored.
Indices map[string]*index // By name of index.
ReferencedBy map[string]struct{} // Type names that reference this type. We require they are registered at the same time to maintain referential integrity.
name string
referencedBy []*index // Indexes (from other types) that reference this type.
references map[string]struct{} // Keys are the type names referenced. This is a summary for the references from Fields.
embedFields []embed // Embed/anonymous fields, their values are stored through Fields, we keep them for setting values.
fillPercent float64 // For "records" bucket. Set to 1 for append-only/mostly use as set with HintAppend, 0.5 otherwise.
}
// note: when changing, possibly update func equal as well.
// embed/anonymous fields are represented as type embed. The fields inside the embed type are of this type field.
type field struct {
Name string
Type fieldType
Nonzero bool
References []string // Referenced fields. Only for the top-level struct fields, not for nested structs.
Default string // As specified in struct tag. Processed version is defaultValue.
// If not the zero reflect.Value, set this value instead of a zero value on insert.
// This is always a non-pointer value. Only set for the current typeVersion
// linked to a Go type.
defaultValue reflect.Value
// Only set if this typeVersion will parse this field. We check
// structField.Type for non-nil before parsing this field. We don't parse it
// if this field is no longer in the type, or if it has been removed and
// added again in later schema versions.
structField reflect.StructField
indices map[string]*index
}
// embed is for embed/anonymous fields. the fields inside are represented as a type field.
type embed struct {
Name string
Type fieldType
structField reflect.StructField
}
type kind int
const (
kindInvalid kind = iota
kindBytes
kindBool
kindInt
kindInt8
kindInt16
kindInt32
kindInt64
kindUint
kindUint8
kindUint16
kindUint32
kindUint64
kindFloat32
kindFloat64
kindMap
kindSlice
kindString
kindTime
kindBinaryMarshal
kindStruct
)
var kindStrings = []string{
"(invalid)",
"bytes",
"bool",
"int",
"int8",
"int16",
"int32",
"int64",
"uint",
"uint8",
"uint16",
"uint32",
"uint64",
"float32",
"float64",
"map",
"slice",
"string",
"time",
"binarymarshal",
"struct",
}
func (k kind) String() string {
return kindStrings[k]
}
type fieldType struct {
Ptr bool // If type is a pointer.
Kind kind // Type with possible Ptr deferenced.
Fields []field // For kindStruct.
MapKey, MapValue *fieldType // For kindMap.
List *fieldType // For kindSlice.
}
func (ft fieldType) String() string {
s := ft.Kind.String()
if ft.Ptr {
return s + "ptr"
}
return s
}
// Options configure how a database should be opened or initialized.
type Options struct {
Timeout time.Duration // Abort if opening DB takes longer than Timeout.
Perm fs.FileMode // Permissions for new file if created. If zero, 0600 is used.
MustExist bool // Before opening, check that file exists. If not, io/fs.ErrNotExist is returned.
}
// Open opens a bstore database and registers types by calling Register.
//
// If the file does not exist, a new database file is created, unless opts has
// MustExist set. Files are created with permission 0600, or with Perm from
// Options if nonzero.
//
// Only one DB instance can be open for a file at a time. Use opts.Timeout to
// specify a timeout during open to prevent indefinite blocking.
func Open(path string, opts *Options, typeValues ...any) (*DB, error) {
var bopts *bolt.Options
if opts != nil && opts.Timeout > 0 {
bopts = &bolt.Options{Timeout: opts.Timeout}
}
var mode fs.FileMode = 0600
if opts != nil && opts.Perm != 0 {
mode = opts.Perm
}
if opts != nil && opts.MustExist {
if _, err := os.Stat(path); err != nil {
return nil, err
}
}
bdb, err := bolt.Open(path, mode, bopts)
if err != nil {
return nil, err
}
typeNames := map[string]storeType{}
types := map[reflect.Type]storeType{}
db := &DB{bdb: bdb, typeNames: typeNames, types: types}
if err := db.Register(typeValues...); err != nil {
bdb.Close()
return nil, err
}
return db, nil
}
// Close closes the underlying database.
func (db *DB) Close() error {
return db.bdb.Close()
}
// Stats returns usage statistics for the lifetime of DB. Stats are tracked
// first in a Query or a Tx. Stats from a Query are propagated to its Tx when
// the Query finishes. Stats from a Tx are propagated to its DB when the
// transaction ends.
func (db *DB) Stats() Stats {
db.statsMutex.Lock()
defer db.statsMutex.Unlock()
return db.stats
}
// Stats returns usage statistics for this transaction.
// When a transaction is rolled back or committed, its statistics are copied
// into its DB.
func (tx *Tx) Stats() Stats {
return tx.stats
}
// WriteTo writes the entire database to w, not including changes made during this transaction.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
return tx.btx.WriteTo(w)
}
// return a bucket through cache.
func (tx *Tx) bucket(bk bucketKey) (*bolt.Bucket, error) {
if tx.bucketCache == nil {
tx.bucketCache = map[bucketKey]*bolt.Bucket{}
}
b := tx.bucketCache[bk]
if b != nil {
return b, nil
}
top := tx.bucketCache[bucketKey{bk.typeName, ""}]
if top == nil {
tx.stats.Bucket.Get++
top = tx.btx.Bucket([]byte(bk.typeName))
if top == nil {
return nil, fmt.Errorf("%w: missing bucket for type %q", ErrStore, bk.typeName)
}
tx.bucketCache[bucketKey{bk.typeName, ""}] = top
}
if bk.sub == "" {
return top, nil
}
tx.stats.Bucket.Get++
b = top.Bucket([]byte(bk.sub))
if b == nil {
return nil, fmt.Errorf("%w: missing bucket %q for type %q", ErrStore, bk.sub, bk.typeName)
}
tx.bucketCache[bk] = b
return b, nil
}
func (tx *Tx) typeBucket(typeName string) (*bolt.Bucket, error) {
return tx.bucket(bucketKey{typeName, ""})
}
func (tx *Tx) recordsBucket(typeName string, fillPercent float64) (*bolt.Bucket, error) {
b, err := tx.bucket(bucketKey{typeName, "records"})
if err != nil {
return nil, err
}
b.FillPercent = fillPercent
return b, nil
}
func (tx *Tx) indexBucket(idx *index) (*bolt.Bucket, error) {
return tx.bucket(bucketKey{idx.tv.name, "index." + idx.Name})
}
// Drop removes a type and its data from the database.
// If the type is currently registered, it is unregistered and no longer available.
// If a type is still referenced by another type, eg through a "ref" struct tag,
// ErrReference is returned.
// If the type does not exist, ErrAbsent is returned.
func (db *DB) Drop(name string) error {
return db.Write(func(tx *Tx) error {
tx.stats.Bucket.Get++
if tx.btx.Bucket([]byte(name)) == nil {
return ErrAbsent
}
if st, ok := db.typeNames[name]; ok && len(st.Current.referencedBy) > 0 {
return fmt.Errorf("%w: type is still referenced", ErrReference)
} else if ok {
for ref := range st.Current.references {
var n []*index
for _, idx := range db.typeNames[ref].Current.referencedBy {
if idx.tv != st.Current {
n = append(n, idx)
}
}
db.typeNames[ref].Current.referencedBy = n
}
delete(db.typeNames, name)
delete(db.types, st.Type)
}
tx.stats.Bucket.Delete++
return tx.btx.DeleteBucket([]byte(name))
})
}
// Delete calls Delete on a new writable Tx.
func (db *DB) Delete(values ...any) error {
return db.Write(func(tx *Tx) error {
return tx.Delete(values...)
})
}
// Get calls Get on a new read-only Tx.
func (db *DB) Get(values ...any) error {
return db.Read(func(tx *Tx) error {
return tx.Get(values...)
})
}
// Insert calls Insert on a new writable Tx.
func (db *DB) Insert(values ...any) error {
return db.Write(func(tx *Tx) error {
return tx.Insert(values...)
})
}
// Update calls Update on a new writable Tx.
func (db *DB) Update(values ...any) error {
return db.Write(func(tx *Tx) error {
return tx.Update(values...)
})
}
var typeKinds = map[reflect.Kind]kind{
reflect.Bool: kindBool,
reflect.Int: kindInt,
reflect.Int8: kindInt8,
reflect.Int16: kindInt16,
reflect.Int32: kindInt32,
reflect.Int64: kindInt64,
reflect.Uint: kindUint,
reflect.Uint8: kindUint8,
reflect.Uint16: kindUint16,
reflect.Uint32: kindUint32,
reflect.Uint64: kindUint64,
reflect.Float32: kindFloat32,
reflect.Float64: kindFloat64,
reflect.Map: kindMap,
reflect.Slice: kindSlice,
reflect.String: kindString,
}
func typeKind(t reflect.Type) (kind, error) {
if t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 {
return kindBytes, nil
}
k, ok := typeKinds[t.Kind()]
if ok {
return k, nil
}
if t == reflect.TypeOf(zerotime) {
return kindTime, nil
}
if reflect.PointerTo(t).AssignableTo(reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()) {
return kindBinaryMarshal, nil
}
if t.Kind() == reflect.Struct {
return kindStruct, nil
}
return kind(0), fmt.Errorf("%w: unsupported type %v", ErrType, t)
}
func typeName(t reflect.Type) (string, error) {
tags, err := newStoreTags(t.Field(0).Tag.Get("bstore"), true)
if err != nil {
return "", err
}
if name, err := tags.Get("typename"); err != nil {
return "", err
} else if name != "" {
return name, nil
}
return t.Name(), nil
}
// Get value for a key. For insert a next sequence may be generated for the
// primary key.
func (tv typeVersion) keyValue(tx *Tx, rv reflect.Value, insert bool, rb *bolt.Bucket) ([]byte, reflect.Value, bool, error) {
f := tv.Fields[0]
krv := rv.FieldByIndex(f.structField.Index)
var seq bool
if krv.IsZero() {
if !insert {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key can not be zero value", ErrParam)
}
if tv.Noauto {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: primary key cannot be zero value without autoincrement", ErrParam)
}
id, err := rb.NextSequence()
if err != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("next primary key: %w", err)
}
switch f.Type.Kind {
case kindInt, kindInt8, kindInt16, kindInt32, kindInt64:
if krv.OverflowInt(int64(id)) {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq)
}
krv.SetInt(int64(id))
case kindUint, kindUint8, kindUint16, kindUint32, kindUint64:
if krv.OverflowUint(id) {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: next primary key sequence does not fit in type", ErrSeq)
}
krv.SetUint(id)
default:
// todo: should check this during register.
return nil, reflect.Value{}, seq, fmt.Errorf("%w: unsupported autoincrement primary key type %v", ErrZero, f.Type.Kind)
}
seq = true
} else if !tv.Noauto && insert {
// We let user insert their own ID for our own autoincrement
// PK. But we update the internal next sequence if the users's
// PK is highest yet, so a future autoincrement insert will succeed.
switch f.Type.Kind {
case kindInt, kindInt8, kindInt16, kindInt32, kindInt64:
v := krv.Int()
if v > 0 && uint64(v) > rb.Sequence() {
if err := rb.SetSequence(uint64(v)); err != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err)
}
}
case kindUint, kindUint8, kindUint16, kindUint32, kindUint64:
v := krv.Uint()
if v > rb.Sequence() {
if err := rb.SetSequence(v); err != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: updating sequence: %s", ErrStore, err)
}
}
}
}
k, err := packPK(krv)
if err != nil {
return nil, reflect.Value{}, seq, err
}
if seq {
tx.stats.Records.Get++
if rb.Get(k) != nil {
return nil, reflect.Value{}, seq, fmt.Errorf("%w: internal error: next sequence value is already present", ErrUnique)
}
}
return k, krv, seq, err
}
// Read calls function fn with a new read-only transaction, ensuring transaction rollback.
func (db *DB) Read(fn func(*Tx) error) error {
db.typesMutex.RLock()
defer db.typesMutex.RUnlock()
return db.bdb.View(func(btx *bolt.Tx) error {
tx := &Tx{db: db, btx: btx}
tx.stats.Reads++
defer tx.addStats()
return fn(tx)
})
}
// Write calls function fn with a new read-write transaction. If fn returns
// nil, the transaction is committed. Otherwise the transaction is rolled back.
func (db *DB) Write(fn func(*Tx) error) error {
db.typesMutex.RLock()
defer db.typesMutex.RUnlock()
return db.bdb.Update(func(btx *bolt.Tx) error {
tx := &Tx{db: db, btx: btx}
tx.stats.Writes++
defer tx.addStats()
return fn(tx)
})
}
// lookup storeType based on name of rt.
func (db *DB) storeType(rt reflect.Type) (storeType, error) {
st, ok := db.types[rt]
if !ok {
return storeType{}, fmt.Errorf("%w: %v", ErrType, rt)
}
return st, nil
}
// HintAppend sets a hint whether changes to the types indicated by each struct
// from values is (mostly) append-only.
//
// This currently sets the BoltDB bucket FillPercentage to 1 for efficient use
// of storage space.
func (db *DB) HintAppend(append bool, values ...any) error {
db.typesMutex.Lock()
defer db.typesMutex.Unlock()
for _, v := range values {
t := reflect.TypeOf(v)
st, err := db.storeType(t)
if err != nil {
return err
}
if append {
st.Current.fillPercent = 1.0
} else {
st.Current.fillPercent = 0.5
}
}
return nil
}

69
vendor/github.com/mjl-/bstore/tags.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
package bstore
import (
"fmt"
"strings"
)
type storeTags []string
func newStoreTags(tag string, isPK bool) (storeTags, error) {
if tag == "" {
return nil, nil
}
l := strings.Split(tag, ",")
for _, s := range l {
w := strings.SplitN(s, " ", 2)
switch w[0] {
case "noauto", "typename":
if !isPK {
return nil, fmt.Errorf("%w: cannot have tag %q for non-primary key", ErrType, w[0])
}
case "index", "unique", "default", "-":
if isPK {
return nil, fmt.Errorf("%w: cannot have tag %q on primary key", ErrType, w[0])
}
case "name", "nonzero", "ref":
default:
return nil, fmt.Errorf("%w: unknown store tag %q", ErrType, w[0])
}
}
return storeTags(l), nil
}
func (t storeTags) Has(word string) bool {
for _, s := range t {
if s == word {
return true
}
}
return false
}
func (t storeTags) Get(word string) (string, error) {
wordsp := word + " "
for _, s := range t {
if strings.HasPrefix(s, wordsp) {
r := s[len(wordsp):]
if r == "" {
return "", fmt.Errorf("%w: bstore word %q requires non-empty parameter", ErrType, word)
}
return r, nil
} else if s == word {
return "", fmt.Errorf("%w: bstore word %q requires argument", ErrType, word)
}
}
return "", nil
}
func (t storeTags) List(word string) []string {
var l []string
wordsp := word + " "
for _, s := range t {
if strings.HasPrefix(s, wordsp) {
l = append(l, s[len(wordsp):])
}
}
return l
}

438
vendor/github.com/mjl-/bstore/tx.go generated vendored Normal file
View File

@ -0,0 +1,438 @@
package bstore
import (
"bytes"
"fmt"
"reflect"
bolt "go.etcd.io/bbolt"
)
func (tx *Tx) structptr(value any) (reflect.Value, error) {
rv := reflect.ValueOf(value)
if !rv.IsValid() || rv.Kind() != reflect.Ptr || !rv.Elem().IsValid() || rv.Type().Elem().Kind() != reflect.Struct {
return reflect.Value{}, fmt.Errorf("%w: value must be non-nil pointer to a struct, is %T", ErrParam, value)
}
rv = rv.Elem()
return rv, nil
}
func (tx *Tx) structOrStructptr(value any) (reflect.Value, error) {
rv := reflect.ValueOf(value)
if !rv.IsValid() {
return reflect.Value{}, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam)
}
if rv.Kind() == reflect.Ptr {
rv = rv.Elem()
if !rv.IsValid() {
return rv, fmt.Errorf("%w: value must be non-nil if pointer", ErrParam)
}
}
if rv.Kind() != reflect.Struct {
return reflect.Value{}, fmt.Errorf("%w: value must be a struct or pointer to a struct, is %T", ErrParam, value)
}
return rv, nil
}
// update indices by comparing indexed fields of the ov (old) and v (new). Only if
// the fields changed will the index be updated. Either ov or v may be the
// reflect.Value zero value, indicating there is no old/new value and the index
// should be updated.
func (tx *Tx) updateIndices(tv *typeVersion, pk []byte, ov, v reflect.Value) error {
changed := func(idx *index) bool {
for _, f := range idx.Fields {
rofv := ov.FieldByIndex(f.structField.Index)
nofv := v.FieldByIndex(f.structField.Index)
// note: checking the interface values is enough, we only allow comparable types as index fields.
if rofv.Interface() != nofv.Interface() {
return true
}
}
return false
}
for _, idx := range tv.Indices {
var add, remove bool
if !ov.IsValid() {
add = true
} else if !v.IsValid() {
remove = true
} else if !changed(idx) {
continue
} else {
add, remove = true, true
}
ib, err := tx.indexBucket(idx)
if err != nil {
return err
}
if remove {
_, ik, err := idx.packKey(ov, pk)
if err != nil {
return err
}
tx.stats.Index.Delete++
if sanityChecks {
tx.stats.Index.Get++
if ib.Get(ik) == nil {
return fmt.Errorf("internal error: key missing from index")
}
}
if err := ib.Delete(ik); err != nil {
return fmt.Errorf("%w: removing from index: %s", ErrStore, err)
}
}
if add {
prek, ik, err := idx.packKey(v, pk)
if err != nil {
return err
}
if idx.Unique {
tx.stats.Index.Cursor++
if xk, _ := ib.Cursor().Seek(prek); xk != nil && bytes.HasPrefix(xk, prek) {
return fmt.Errorf("%w: %q", ErrUnique, idx.Name)
}
}
tx.stats.Index.Put++
if err := ib.Put(ik, []byte{}); err != nil {
return fmt.Errorf("inserting into index: %w", err)
}
}
}
return nil
}
func (tx *Tx) checkReferences(tv *typeVersion, pk []byte, ov, rv reflect.Value) error {
for _, f := range tv.Fields {
if len(f.References) == 0 {
continue
}
frv := rv.FieldByIndex(f.structField.Index)
if frv.IsZero() || (ov.IsValid() && ov.FieldByIndex(f.structField.Index).Interface() == frv.Interface()) {
continue
}
k, err := packPK(frv)
if err != nil {
return err
}
for _, name := range f.References {
rb, err := tx.recordsBucket(name, tv.fillPercent)
if err != nil {
return err
}
if rb.Get(k) == nil {
return fmt.Errorf("%w: value %v from field %q to %q", ErrReference, frv.Interface(), f.Name, name)
}
}
}
return nil
}
func (tx *Tx) addStats() {
tx.db.statsMutex.Lock()
tx.db.stats.add(tx.stats)
tx.db.statsMutex.Unlock()
tx.stats = Stats{}
}
// Get fetches records by their primary key from the database. Each value must
// be a pointer to a struct.
//
// ErrAbsent is returned if the record does not exist.
func (tx *Tx) Get(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Get++
rv, err := tx.structptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
k, _, _, err := st.Current.keyValue(tx, rv, false, rb)
if err != nil {
return err
}
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
if err := st.parse(rv, bv); err != nil {
return err
}
}
return nil
}
// Delete removes values by their primary key from the database. Each value
// must be a struct or pointer to a struct. Indices are automatically updated
// and referential integrity is maintained.
//
// ErrAbsent is returned if the record does not exist.
// ErrReference is returned if another record still references this record.
func (tx *Tx) Delete(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Delete++
rv, err := tx.structOrStructptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
k, _, _, err := st.Current.keyValue(tx, rv, false, rb)
if err != nil {
return err
}
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
rov, err := st.parseNew(k, bv)
if err != nil {
return fmt.Errorf("parsing current value: %w", err)
}
if err := tx.delete(rb, st, k, rov); err != nil {
return err
}
}
return nil
}
func (tx *Tx) delete(rb *bolt.Bucket, st storeType, k []byte, rov reflect.Value) error {
// Check that anyone referencing this type does not reference this record.
for _, refBy := range st.Current.referencedBy {
if ib, err := tx.indexBucket(refBy); err != nil {
return err
} else {
tx.stats.Index.Cursor++
if xk, _ := ib.Cursor().Seek(k); xk != nil && bytes.HasPrefix(xk, k) {
return fmt.Errorf("%w: index %q", ErrReference, refBy.Name)
}
}
}
// Delete value from indices.
if err := tx.updateIndices(st.Current, k, rov, reflect.Value{}); err != nil {
return fmt.Errorf("removing from indices: %w", err)
}
tx.stats.Records.Delete++
return rb.Delete(k)
}
// Update updates records represented by values by their primary keys into the
// database. Each value must be a pointer to a struct. Indices are
// automatically updated.
//
// ErrAbsent is returned if the record does not exist.
func (tx *Tx) Update(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Update++
rv, err := tx.structptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
if err := tx.put(st, rv, false); err != nil {
return err
}
}
return nil
}
// Insert inserts values as new records into the database. Each value must be a
// pointer to a struct. If the primary key field is zero and autoincrement is not
// disabled, the next sequence is assigned. Indices are automatically updated.
//
// ErrUnique is returned if the record already exists.
// ErrSeq is returned if no next autoincrement integer is available.
// ErrZero is returned if a nonzero constraint would be violated.
// ErrReference is returned if another record is referenced that does not exist.
func (tx *Tx) Insert(values ...any) error {
if tx.db == nil {
return errTxClosed
}
for _, value := range values {
tx.stats.Insert++
rv, err := tx.structptr(value)
if err != nil {
return err
}
st, err := tx.db.storeType(rv.Type())
if err != nil {
return err
}
if err := st.Current.applyDefault(rv); err != nil {
return err
}
if err := tx.put(st, rv, true); err != nil {
return err
}
}
return nil
}
func (tx *Tx) put(st storeType, rv reflect.Value, insert bool) error {
rb, err := tx.recordsBucket(st.Current.name, st.Current.fillPercent)
if err != nil {
return err
}
k, krv, seq, err := st.Current.keyValue(tx, rv, insert, rb)
if err != nil {
return err
}
if insert {
tx.stats.Records.Get++
bv := rb.Get(k)
if bv != nil {
return fmt.Errorf("%w: record already exists", ErrUnique)
}
err := tx.insert(rb, st, rv, krv, k)
if err != nil && seq {
// Zero out the generated sequence.
krv.Set(reflect.Zero(krv.Type()))
}
return err
} else {
tx.stats.Records.Get++
bv := rb.Get(k)
if bv == nil {
return ErrAbsent
}
ov, err := st.parseNew(k, bv)
if err != nil {
return fmt.Errorf("parsing current value: %w", err)
}
return tx.update(rb, st, rv, ov, k)
}
}
func (tx *Tx) insert(rb *bolt.Bucket, st storeType, rv, krv reflect.Value, k []byte) error {
v, err := st.pack(rv)
if err != nil {
return err
}
if err := tx.checkReferences(st.Current, k, reflect.Value{}, rv); err != nil {
return err
}
if err := tx.updateIndices(st.Current, k, reflect.Value{}, rv); err != nil {
return fmt.Errorf("updating indices for inserted value: %w", err)
}
tx.stats.Records.Put++
if err := rb.Put(k, v); err != nil {
return err
}
rv.Field(0).Set(krv)
return nil
}
func (tx *Tx) update(rb *bolt.Bucket, st storeType, rv, rov reflect.Value, k []byte) error {
if st.Current.equal(rov, rv) {
return nil
}
v, err := st.pack(rv)
if err != nil {
return err
}
if err := tx.checkReferences(st.Current, k, rov, rv); err != nil {
return err
}
if err := tx.updateIndices(st.Current, k, rov, rv); err != nil {
return fmt.Errorf("updating indices for updated record: %w", err)
}
tx.stats.Records.Put++
return rb.Put(k, v)
}
// Begin starts a transaction.
//
// If writable is true, the transaction allows modifications. Only one writable
// transaction can be active at a time on a DB. No read-only transactions can be
// active at the same time. Attempting to begin a read-only transaction from a
// writable transaction leads to deadlock.
//
// A writable Tx can be committed or rolled back. A read-only transaction must
// always be rolled back.
func (db *DB) Begin(writable bool) (*Tx, error) {
btx, err := db.bdb.Begin(writable)
if err != nil {
return nil, err
}
db.typesMutex.RLock()
tx := &Tx{db: db, btx: btx}
if writable {
tx.stats.Writes++
} else {
tx.stats.Reads++
}
return tx, nil
}
// Rollback aborts and cancels any changes made in this transaction.
// Statistics are added to its DB.
func (tx *Tx) Rollback() error {
if tx.db == nil {
return errTxClosed
}
tx.addStats()
tx.db.typesMutex.RUnlock()
err := tx.btx.Rollback()
tx.db = nil
return err
}
// Commit commits changes made in the transaction to the database.
// Statistics are added to its DB.
func (tx *Tx) Commit() error {
if tx.db == nil {
return errTxClosed
}
tx.addStats()
tx.db.typesMutex.RUnlock()
err := tx.btx.Commit()
if err != nil {
tx.btx.Rollback() // Nothing to do for error.
}
tx.db = nil
return err
}

2
vendor/github.com/mjl-/sconf/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
/cmd/sconfexample/sconfexample
/cover.*

7
vendor/github.com/mjl-/sconf/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2019 Mechiel Lukkien
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

12
vendor/github.com/mjl-/sconf/Makefile generated vendored Normal file
View File

@ -0,0 +1,12 @@
build:
go build ./...
go vet ./...
GOARCH=386 go vet ./...
staticcheck ./...
fmt:
gofmt -w -s *.go cmd/*/*.go
test:
go test -shuffle=on -coverprofile cover.out
go tool cover -html=cover.out -o cover.html

6
vendor/github.com/mjl-/sconf/README.txt generated vendored Normal file
View File

@ -0,0 +1,6 @@
sconf - simple config files
See https://godoc.org/github.com/mjl-/sconf for documentation.
# todo
- deal better with unexpected types. need to use canset?

264
vendor/github.com/mjl-/sconf/describe.go generated vendored Normal file
View File

@ -0,0 +1,264 @@
package sconf
import (
"bufio"
"errors"
"fmt"
"reflect"
"sort"
"strings"
"github.com/mjl-/xfmt"
)
var errNoElem = errors.New("no elements")
type writeError struct{ error }
type writer struct {
out *bufio.Writer
prefix string
keepZero bool // If set, we also write zero values.
docs bool // If set, we write comments.
}
func (w *writer) error(err error) {
panic(writeError{err})
}
func (w *writer) check(err error) {
if err != nil {
w.error(err)
}
}
func (w *writer) write(s string) {
_, err := w.out.WriteString(s)
w.check(err)
}
func (w *writer) flush() {
err := w.out.Flush()
w.check(err)
}
func (w *writer) indent() {
w.prefix += "\t"
}
func (w *writer) unindent() {
w.prefix = w.prefix[:len(w.prefix)-1]
}
func isOptional(sconfTag string) bool {
return hasTagWord(sconfTag, "optional")
}
func isIgnore(sconfTag string) bool {
return hasTagWord(sconfTag, "-") || hasTagWord(sconfTag, "ignore")
}
func hasTagWord(sconfTag, word string) bool {
l := strings.Split(sconfTag, ",")
for _, s := range l {
if s == word {
return true
}
}
return false
}
func (w *writer) describeMap(v reflect.Value) {
t := v.Type()
if t.Key().Kind() != reflect.String {
w.error(fmt.Errorf("map key must be string"))
}
keys := v.MapKeys()
sort.Slice(keys, func(i, j int) bool {
return keys[i].String() < keys[j].String()
})
have := false
for _, k := range keys {
have = true
w.write(w.prefix)
w.write(k.String() + ":")
mv := v.MapIndex(k)
if !w.keepZero && mv.Kind() == reflect.Struct && isEmptyStruct(mv) {
w.write(" nil\n")
continue
}
w.describeValue(mv)
}
if have {
return
}
w.write(w.prefix)
w.write("x:")
w.describeValue(reflect.Zero(t.Elem()))
}
// whether v is a zero value of a struct type with all fields optional or
// ignored, causing it to write nothing when using Write.
func isEmptyStruct(v reflect.Value) bool {
if v.Kind() != reflect.Struct {
panic("not a struct")
}
t := v.Type()
n := t.NumField()
for i := 0; i < n; i++ {
ft := t.Field(i)
tag := ft.Tag.Get("sconf")
if isIgnore(tag) {
continue
}
if !isOptional(tag) {
return false
}
if !isZeroIgnored(v.Field(i)) {
return false
}
}
return true
}
// whether v is zero, taking ignored values into account.
func isZeroIgnored(v reflect.Value) bool {
switch v.Kind() {
case reflect.Slice, reflect.Map:
return v.Len() == 0
case reflect.Ptr:
return v.IsZero() || isZeroIgnored(v.Elem())
case reflect.Struct:
t := v.Type()
n := t.NumField()
for i := 0; i < n; i++ {
ft := t.Field(i)
tag := ft.Tag.Get("sconf")
if isIgnore(tag) {
continue
}
if !isZeroIgnored(v.Field(i)) {
return false
}
}
return true
default:
return v.IsZero()
}
}
func (w *writer) describeStruct(v reflect.Value) {
t := v.Type()
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
fv := v.Field(i)
if isIgnore(f.Tag.Get("sconf")) {
continue
}
if !w.keepZero && isOptional(f.Tag.Get("sconf")) && isZeroIgnored(fv) {
continue
}
if w.docs {
doc := f.Tag.Get("sconf-doc")
optional := isOptional(f.Tag.Get("sconf"))
if doc != "" || optional {
s := "\n" + w.prefix + "# " + doc
if optional {
opt := "(optional)"
if doc != "" {
opt = " " + opt
}
s += opt
}
s += "\n"
b := &strings.Builder{}
err := xfmt.Format(b, strings.NewReader(s), xfmt.Config{MaxWidth: 80})
w.check(err)
w.write(b.String())
}
}
w.write(w.prefix)
w.write(f.Name + ":")
w.describeValue(fv)
}
}
func (w *writer) describeValue(v reflect.Value) {
t := v.Type()
i := v.Interface()
if t == durationType {
w.write(fmt.Sprintf(" %s\n", i))
return
}
switch t.Kind() {
default:
w.error(fmt.Errorf("unsupported value %v", t.Kind()))
return
case reflect.Bool:
w.write(fmt.Sprintf(" %v\n", i))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
w.write(fmt.Sprintf(" %d\n", i))
case reflect.Float32, reflect.Float64:
w.write(fmt.Sprintf(" %f\n", i))
case reflect.String:
if strings.Contains(v.String(), "\n") {
w.error(fmt.Errorf("unsupported multiline string"))
}
w.write(fmt.Sprintf(" %s\n", i))
case reflect.Slice:
w.write("\n")
w.indent()
w.describeSlice(v)
w.unindent()
case reflect.Ptr:
var pv reflect.Value
if v.IsNil() {
pv = reflect.New(t.Elem()).Elem()
} else {
pv = v.Elem()
}
w.describeValue(pv)
case reflect.Struct:
w.write("\n")
w.indent()
w.describeStruct(v)
w.unindent()
case reflect.Map:
w.write("\n")
w.indent()
w.describeMap(v)
w.unindent()
}
}
func (w *writer) describeSlice(v reflect.Value) {
describeElem := func(vv reflect.Value) {
w.write(w.prefix)
w.write("-")
w.describeValue(vv)
}
n := v.Len()
if n == 0 {
if w.keepZero {
describeElem(reflect.New(v.Type().Elem()))
} else {
w.error(errNoElem)
}
}
for i := 0; i < n; i++ {
describeElem(v.Index(i))
}
}

106
vendor/github.com/mjl-/sconf/doc.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
/*
Package sconf parses simple configuration files and generates commented example config files.
Sconf is the name of this package and of the config file format. The file format
is inspired by JSON and yaml, but easier to write and use correctly.
Sconf goals:
- Make the application self-documenting about its configuration requirements.
- Require full configuration of an application via a config file, finding
mistakes by the operator.
- Make it easy to write a correct config file, no surprises.
Workflow for using this package:
- Write a Go struct with the config for your application.
- Simply parse a config into that struct with Parse() or ParseFile().
- Write out an example config file with all fields that need to be set with
Describe(), and associated comments that you configured in struct tags.
Features of sconf as file format:
- Types similar to JSON, mapping naturally to types in programming languages.
- Requires far fewer type-describing tokens. no "" for map keys, strings don't
require "", no [] for arrays or {} for maps (like in JSON). Sconf uses the Go
types to guide parsing the config.
- Can have comments (JSON cannot).
- Is simple, does not allow all kinds of syntaxes you would not ever want to use.
- Uses indenting for nested structures (with the indent character).
An example config file:
# comment for stringKey (optional)
StringKey: value1
IntKey: 123
BoolKey: true
Struct:
# this is the A-field
A: 321
B: true
# (optional)
C: this is text
StringArray:
- blah
- blah
# nested structs work just as well
Nested:
-
A: 1
B: false
C: hoi
-
A: -1
B: true
C: hallo
The top-level is always a map, typically parsed into a Go struct. Maps start
with a key, followed by a colon, followed by a value. Basic values like
strings, ints, bools run to the end of the line. The leading space after a
colon or dash is removed. Other values like maps and lists start on a new line,
with an additional level of indenting. List values start with a dash. Empty
lines are allowed. Multiline strings are not possible. Strings do not have
escaped characters.
And the struct that generated this:
var config struct {
StringKey string `sconf-doc:"comment for stringKey" sconf:"optional"`
IntKey int64
BoolKey bool
Struct struct {
A int `sconf-doc:"this is the A-field"`
B bool
C string `sconf:"optional"`
}
StringArray []string
Nested []struct {
A int
B bool
C string
} `sconf-doc:"nested structs work just as well"`
}
See cmd/sconfexample/main.go for more details.
In practice, you will mostly have nested maps:
Database:
Host: localhost
DBName: myapp
User: myuser
Mail:
SMTP:
TLS: true
Host: mail.example.org
Sconf only parses config files. It does not deal with command-line flags or
environment variables. Flags and environment variables are too limiting in data
types. Especially environment variables are error prone: Applications typically
have default values they fall back to, so will not notice typo's or unrecognized
variables. Config files also have the nice property of being easy to diff, copy
around, store in a VCS. In practice, command-line flags and environment
variables are commonly stored in config files. Sconf goes straight to the config
files.
*/
package sconf

308
vendor/github.com/mjl-/sconf/parse.go generated vendored Normal file
View File

@ -0,0 +1,308 @@
package sconf
import (
"bufio"
"encoding/base64"
"errors"
"fmt"
"io"
"reflect"
"strconv"
"strings"
"time"
)
type parser struct {
prefix string // indented string
input *bufio.Reader // for reading lines at a time
line string // last read line
linenumber int
}
type parseError struct {
err error
}
func parse(path string, src io.Reader, dst interface{}) (err error) {
p := &parser{
input: bufio.NewReader(src),
}
defer func() {
x := recover()
if x == nil {
return
}
perr, ok := x.(parseError)
if ok {
err = fmt.Errorf("%s:%d: %v", path, p.linenumber, perr.err)
return
}
panic(x)
}()
v := reflect.ValueOf(dst)
if v.Kind() != reflect.Ptr {
p.stop("destination not a pointer")
}
p.parseStruct0(v.Elem())
return
}
func (p *parser) stop(err string) {
panic(parseError{errors.New(err)})
}
func (p *parser) check(err error, action string) {
if err != nil {
p.stop(fmt.Sprintf("%s: %s", action, err))
}
}
func (p *parser) string() string {
return p.line
}
func (p *parser) leave(s string) {
p.line = s
}
func (p *parser) consume() string {
s := p.line
p.line = ""
return s
}
// Next returns whether the next line is properly indented, reading data as necessary.
func (p *parser) next() bool {
for p.line == "" {
s, err := p.input.ReadString('\n')
if s == "" {
if err == io.EOF {
return false
}
p.stop(err.Error())
}
p.linenumber++
if strings.HasPrefix(strings.TrimSpace(s), "#") {
continue
}
p.line = strings.TrimSuffix(s, "\n")
}
// Less indenting than expected. Let caller stop, returning to its caller for lower-level indent.
r := strings.HasPrefix(p.line, p.prefix)
return r
}
func (p *parser) indent() {
p.prefix += "\t"
if !p.next() {
p.stop("expected indent")
}
}
func (p *parser) unindent() {
p.prefix = p.prefix[1:]
}
var durationType = reflect.TypeOf(time.Duration(0))
func (p *parser) parseValue(v reflect.Value) reflect.Value {
t := v.Type()
if t == durationType {
s := p.consume()
d, err := time.ParseDuration(s)
p.check(err, "parsing duration")
v.Set(reflect.ValueOf(d))
return v
}
switch t.Kind() {
default:
p.stop(fmt.Sprintf("cannot parse type %v", t.Kind()))
case reflect.Bool:
s := p.consume()
switch s {
case "false":
v.SetBool(false)
case "true":
v.SetBool(true)
default:
p.stop(fmt.Sprintf("bad boolean value %q", s))
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := p.consume()
x, err := strconv.ParseInt(s, 10, 64)
p.check(err, "parsing integer")
v.SetInt(x)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
s := p.consume()
x, err := strconv.ParseUint(s, 10, 64)
p.check(err, "parsing integer")
v.SetUint(x)
case reflect.Float32, reflect.Float64:
s := p.consume()
x, err := strconv.ParseFloat(s, 64)
p.check(err, "parsing float")
v.SetFloat(x)
case reflect.String:
v.SetString(p.consume())
case reflect.Slice:
v = p.parseSlice(v)
case reflect.Ptr:
vv := reflect.New(t.Elem())
p.parseValue(vv.Elem())
v.Set(vv)
case reflect.Struct:
p.parseStruct(v)
case reflect.Map:
v = reflect.MakeMap(t)
p.parseMap(v)
}
return v
}
func (p *parser) parseSlice(v reflect.Value) reflect.Value {
if v.Type().Elem().Kind() == reflect.Uint8 {
s := p.consume()
buf, err := base64.StdEncoding.DecodeString(s)
p.check(err, "parsing base64")
v.SetBytes(buf)
return v
}
p.indent()
defer p.unindent()
return p.parseSlice0(v)
}
func (p *parser) parseSlice0(v reflect.Value) reflect.Value {
for p.next() {
s := p.string()
prefix := p.prefix + "-"
if !strings.HasPrefix(s, prefix) {
p.stop(fmt.Sprintf("expected item, prefix %q, saw %q", prefix, s))
}
s = s[len(prefix):]
if s != "" {
if !strings.HasPrefix(s, " ") {
p.stop("missing space after -")
}
s = s[1:]
}
p.leave(s)
vv := reflect.New(v.Type().Elem()).Elem()
vv = p.parseValue(vv)
v = reflect.Append(v, vv)
}
return v
}
func (p *parser) parseStruct(v reflect.Value) {
p.indent()
defer p.unindent()
p.parseStruct0(v)
}
func (p *parser) parseStruct0(v reflect.Value) {
seen := map[string]struct{}{}
var zeroValue reflect.Value
t := v.Type()
for p.next() {
s := p.string()
s = s[len(p.prefix):]
l := strings.SplitN(s, ":", 2)
if len(l) != 2 {
p.stop("missing key: value")
}
k := l[0]
if k == "" {
p.stop("empty key")
}
if _, ok := seen[k]; ok {
p.stop("duplicate key")
}
seen[k] = struct{}{}
s = l[1]
if s != "" && !strings.HasPrefix(s, " ") {
p.stop("no space after colon")
}
if s != "" {
s = s[1:]
}
p.leave(s)
vv := v.FieldByName(k)
if vv == zeroValue {
p.stop(fmt.Sprintf("unknown key %q", k))
}
if ft, _ := t.FieldByName(k); isIgnore(ft.Tag.Get("sconf")) {
p.stop(fmt.Sprintf("unknown key %q (has ignore tag)", k))
}
vv.Set(p.parseValue(vv))
}
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if isIgnore(f.Tag.Get("sconf")) || isOptional(f.Tag.Get("sconf")) {
continue
}
if _, ok := seen[f.Name]; !ok {
p.stop(fmt.Sprintf("missing required key %q", f.Name))
}
}
}
func (p *parser) parseMap(v reflect.Value) {
p.indent()
defer p.unindent()
p.parseMap0(v)
}
func (p *parser) parseMap0(v reflect.Value) {
seen := map[string]struct{}{}
t := v.Type()
for p.next() {
s := p.string()
s = s[len(p.prefix):]
l := strings.SplitN(s, ":", 2)
if len(l) != 2 {
p.stop("missing key: value")
}
k := l[0]
if k == "" {
p.stop("empty key")
}
if _, ok := seen[k]; ok {
p.stop("duplicate key")
}
seen[k] = struct{}{}
s = l[1]
if s != "" && !strings.HasPrefix(s, " ") {
p.stop("no space after colon")
}
if s != "" {
s = s[1:]
}
vv := reflect.New(t.Elem()).Elem()
if s == "nil" {
// Special value "nil" means the zero value, no further parsing of a value.
p.leave("")
} else {
p.leave(s)
vv = p.parseValue(vv)
}
v.SetMapIndex(reflect.ValueOf(k), vv)
}
}

71
vendor/github.com/mjl-/sconf/sconf.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package sconf
import (
"bufio"
"fmt"
"io"
"os"
"reflect"
)
// ParseFile reads an sconf file from path into dst.
func ParseFile(path string, dst interface{}) error {
src, err := os.Open(path)
if err != nil {
return err
}
defer src.Close()
return parse(path, src, dst)
}
// Parse reads an sconf file from a reader into dst.
func Parse(src io.Reader, dst interface{}) error {
return parse("", src, dst)
}
// Describe writes an example sconf file describing v to w. The file includes all
// fields, values and documentation on the fields as configured with the "sconf"
// and "sconf-doc" struct tags. Describe does not detect recursive values and will
// attempt to write them.
func Describe(w io.Writer, v interface{}) error {
return describe(w, v, true, true)
}
// Write writes a valid sconf file describing v to w, without comments, without
// zero values of optional fields. Write does not detect recursive values and
// will attempt to write them.
func Write(w io.Writer, v interface{}) error {
return describe(w, v, false, false)
}
// WriteDocs is like Write, but does write comments.
func WriteDocs(w io.Writer, v interface{}) error {
return describe(w, v, false, true)
}
func describe(w io.Writer, v interface{}, keepZero bool, docs bool) (err error) {
value := reflect.ValueOf(v)
t := value.Type()
if t.Kind() == reflect.Ptr {
value = value.Elem()
t = value.Type()
}
if t.Kind() != reflect.Struct {
return fmt.Errorf("top level object must be a struct, is a %T", v)
}
defer func() {
x := recover()
if x == nil {
return
}
if e, ok := x.(writeError); ok {
err = error(e)
} else {
panic(x)
}
}()
wr := &writer{out: bufio.NewWriter(w), keepZero: keepZero, docs: docs}
wr.describeStruct(value)
wr.flush()
return nil
}

4
vendor/github.com/mjl-/sherpa/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
/cover.out
/cover.html
*\.swp

7
vendor/github.com/mjl-/sherpa/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2016-2018 Mechiel Lukkien
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

27
vendor/github.com/mjl-/sherpa/LICENSE-go generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

16
vendor/github.com/mjl-/sherpa/Makefile generated vendored Normal file
View File

@ -0,0 +1,16 @@
build:
go build ./...
go vet ./...
test:
go test -coverprofile=cover.out ./...
go tool cover -html=cover.out -o cover.html
golint ./...
coverage:
clean:
go clean ./...
fmt:
go fmt ./...

39
vendor/github.com/mjl-/sherpa/README.md generated vendored Normal file
View File

@ -0,0 +1,39 @@
# Sherpa
Sherpa is a Go library for creating a [sherpa API](https://www.ueber.net/who/mjl/sherpa/).
This library makes it trivial to export Go functions as a sherpa API with an http.Handler.
Your API will automatically be documented: github.com/mjl-/sherpadoc reads your Go source, and exports function and type comments as API documentation.
See the [documentation](https://godoc.org/github.com/mjl-/sherpa).
## Examples
A public sherpa API: https://www.sherpadoc.org/#https://www.sherpadoc.org/example/
That web application is [sherpaweb](https://github.com/mjl-/sherpaweb). It shows documentation for any sherpa API but also includes an API called Example for demo purposes.
[Ding](https://github.com/mjl-/ding/) is a more elaborate web application built with this library.
# About
Written by Mechiel Lukkien, mechiel@ueber.net.
Bug fixes, patches, comments are welcome.
MIT-licensed, see LICENSE.
# todo
- add a toggle for enabling calls by GET request. turn off by default for functions with parameters, people might be making requests with sensitive information in query strings...
- include a sherpaweb-like page that displays the documentation
- consider adding input & output validation and timestamp conversion to plain js lib
- consider using interfaces with functions (instead of direct structs) for server implementations. haven't needed it yet, but could be useful for mocking an api that you want to talk to.
- think about way to keep unknown fields. perhaps use a json lib that collects unknown keys in a map (which has to be added to the object for which you want to keep such keys).
- sherpajs: make a versionied, minified variant, with license line
- tool for comparing two jsons for compatibility, listing added sections/functions/types/fields
- be more helpful around errors that functions can generate. perhaps adding a mechanism for listing which errors can occur in the api json.
- handler: write tests
- client: write tests

19
vendor/github.com/mjl-/sherpa/codes.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
package sherpa
// Errors generated by both clients and servers
const (
SherpaBadFunction = "sherpa:badFunction" // Function does not exist at server.
)
// Errors generated by clients
const (
SherpaBadResponse = "sherpa:badResponse" // Bad response from server, e.g. JSON response body could not be parsed.
SherpaHTTPError = "sherpa:http" // Unexpected http response status code from server.
SherpaNoAPI = "sherpa:noAPI" // No API was found at this URL.
)
// Errors generated by servers
const (
SherpaBadRequest = "sherpa:badRequest" // Error parsing JSON request body.
SherpaBadParams = "sherpa:badParams" // Wrong number of parameters in function call.
)

21
vendor/github.com/mjl-/sherpa/collector.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package sherpa
// Collector facilitates collection of metrics. Functions are called by the library as such events or errors occur.
// See https://github.com/irias/sherpa-prometheus-collector for an implementation for prometheus.
type Collector interface {
ProtocolError() // Invalid request at protocol-level, e.g. wrong mimetype or request body.
BadFunction() // Function does not exist.
JavaScript() // Sherpa.js is requested.
JSON() // Sherpa.json is requested.
// Call of function, how long it took, and in case of failure, the error code.
FunctionCall(name string, durationSec float64, errorCode string)
}
type ignoreCollector struct{}
func (ignoreCollector) ProtocolError() {}
func (ignoreCollector) BadFunction() {}
func (ignoreCollector) JavaScript() {}
func (ignoreCollector) JSON() {}
func (ignoreCollector) FunctionCall(name string, durationSec float64, errorCode string) {}

8
vendor/github.com/mjl-/sherpa/doc.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Package sherpa exports your Go functions as fully documented sherpa web API's.
//
// Sherpa is similar to JSON-RPC, but discoverable and self-documenting.
// Read more at https://www.ueber.net/who/mjl/sherpa/.
//
// Use sherpa.NewHandler to export Go functions using a http.Handler.
// An example of how to use NewHandler can be found in https://github.com/mjl-/sherpaweb/
package sherpa

653
vendor/github.com/mjl-/sherpa/handler.go generated vendored Normal file
View File

@ -0,0 +1,653 @@
package sherpa
import (
"bytes"
"context"
"encoding/json"
"fmt"
"html/template"
"io"
"log"
"mime"
"net/http"
"reflect"
"strings"
"time"
"unicode"
"github.com/mjl-/sherpadoc"
)
// SherpaVersion is the version of the Sherpa protocol this package implements. Sherpa is at version 1.
const SherpaVersion = 1
// JSON holds all fields for a request to sherpa.json.
type JSON struct {
ID string `json:"id"`
Title string `json:"title"`
Functions []string `json:"functions"`
BaseURL string `json:"baseurl"`
Version string `json:"version"`
SherpaVersion int `json:"sherpaVersion"`
SherpadocVersion int `json:"sherpadocVersion"`
}
// HandlerOpts are options for creating a new handler.
type HandlerOpts struct {
Collector Collector // Holds functions for collecting metrics about function calls and other incoming HTTP requests. May be nil.
LaxParameterParsing bool // If enabled, incoming sherpa function calls will ignore unrecognized fields in struct parameters, instead of failing.
AdjustFunctionNames string // If empty, only the first character of function names are lower cased. For "lowerWord", the first string of capitals is lowercased, for "none", the function name is left as is.
}
// Raw signals a raw JSON response.
// If a handler panics with this type, the raw bytes are sent (with regular
// response headers).
// Can be used to skip the json encoding from the handler, eg for caching, or
// when you read a properly formatted JSON document from a file or database.
// By using panic to signal a raw JSON response, the return types stay intact
// for sherpadoc to generate documentation from.
type Raw []byte
// handler that responds to all Sherpa-related requests.
type handler struct {
path string
functions map[string]reflect.Value
sherpaJSON *JSON
opts HandlerOpts
}
// Error returned by a function called through a sherpa API.
// Message is a human-readable error message.
// Code is optional, it can be used to handle errors programmatically.
type Error struct {
Code string `json:"code"`
Message string `json:"message"`
}
func (e *Error) Error() string {
return e.Message
}
// InternalServerError is an error that propagates as an HTTP internal server error (HTTP status 500), instead of returning a regular HTTP status 200 OK with the error message in the response body.
// Useful for making Sherpa endpoints that can be monitored by simple HTTP monitoring tools.
type InternalServerError struct {
Code string `json:"code"`
Message string `json:"message"`
}
func (e *InternalServerError) Error() string {
return e.Message
}
func (e *InternalServerError) error() *Error {
return &Error{"internalServerError", e.Message}
}
// Sherpa API response type
type response struct {
Result interface{} `json:"result"`
Error *Error `json:"error,omitempty"`
}
var htmlTemplate *template.Template
func init() {
var err error
htmlTemplate, err = template.New("html").Parse(`<!doctype html>
<html>
<head>
<meta charset="utf-8" />
<title>{{.title}}</title>
<style>
body { font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; line-height:1.4; font-size:16px; color: #333; }
a { color: #327CCB; }
.code { padding: 2px 4px; font-size: 90%; color: #c7254e; background-color: #f9f2f4; border-radius: 4px; }
</style>
</head>
<body>
<div style="margin:1em auto 1em; max-width:45em">
<h1>{{.title}} <span style="font-weight:normal; font-size:0.7em">- version {{.version}}</span></h1>
<p>
This is the base URL for {{.title}}. The API has been loaded on this page, under variable <span class="code">{{.id}}</span>. So open your browser's developer console and start calling functions!
</p>
<p>
You can also the <a href="{{.docURL}}">read documentation</a> for this API.</p>
</p>
<p style="text-align: center; font-size:smaller; margin-top:8ex;">
<a href="https://github.com/mjl-/sherpa/">go sherpa code</a> |
<a href="https://www.ueber.net/who/mjl/sherpa/">sherpa api's</a> |
<a href="https://github.com/mjl-/sherpaweb/">sherpaweb code</a>
</p>
</div>
<script src="{{.jsURL}}"></script>
</body>
</html>`)
if err != nil {
panic(err)
}
}
func getBaseURL(r *http.Request) string {
host := r.Header.Get("X-Forwarded-Host")
if host == "" {
host = r.Host
}
scheme := r.Header.Get("X-Forwarded-Proto")
if scheme == "" {
scheme = "http"
}
return scheme + "://" + host
}
func respondJSON(w http.ResponseWriter, status int, v interface{}) {
respond(w, status, v, false, "")
}
func respond(w http.ResponseWriter, status int, v interface{}, jsonp bool, callback string) {
if jsonp {
w.Header().Add("Content-Type", "text/javascript; charset=utf-8")
} else {
w.Header().Add("Content-Type", "application/json; charset=utf-8")
}
w.WriteHeader(status)
var err error
if jsonp {
_, err = fmt.Fprintf(w, "%s(\n\t", callback)
}
if raw, ok := v.(Raw); err == nil && ok {
_, err = w.Write([]byte(`{"result":`))
if err == nil {
_, err = w.Write(raw)
}
if err == nil {
_, err = w.Write([]byte("}"))
}
} else if err == nil && !ok {
err = json.NewEncoder(w).Encode(v)
}
if err == nil && jsonp {
_, err = fmt.Fprint(w, ");")
}
if err != nil && !isConnectionClosed(err) {
log.Println("writing response:", err)
}
}
// Call function fn with a json body read from r.
// Ctx is from the http.Request, and is canceled when the http connection goes away.
//
// on success, the returned interface contains:
// - nil, if fn has no return value
// - single value, if fn had a single return value
// - slice of values, if fn had multiple return values
// - Raw, for a preformatted JSON response (caught from panic).
//
// on error, we always return an Error with the Code field set.
func (h *handler) call(ctx context.Context, functionName string, fn reflect.Value, r io.Reader) (ret interface{}, ee error) {
defer func() {
e := recover()
if e == nil {
return
}
se, ok := e.(*Error)
if ok {
ee = se
return
}
ierr, ok := e.(*InternalServerError)
if ok {
ee = ierr
return
}
if raw, ok := e.(Raw); ok {
ret = raw
return
}
panic(e)
}()
lcheck := func(err error, code, message string) {
if err != nil {
panic(&Error{Code: code, Message: fmt.Sprintf("function %q: %s: %s", functionName, message, err)})
}
}
var request struct {
Params json.RawMessage `json:"params"`
}
dec := json.NewDecoder(r)
dec.DisallowUnknownFields()
err := dec.Decode(&request)
lcheck(err, SherpaBadRequest, "invalid JSON request body")
fnt := fn.Type()
var params []interface{}
err = json.Unmarshal(request.Params, &params)
lcheck(err, SherpaBadRequest, "invalid JSON request body")
needArgs := fnt.NumIn()
needValues := needArgs
ctxType := reflect.TypeOf((*context.Context)(nil)).Elem()
needsContext := needValues > 0 && fnt.In(0).Implements(ctxType)
if needsContext {
needArgs--
}
if fnt.IsVariadic() {
if len(params) != needArgs-1 && len(params) != needArgs {
err = fmt.Errorf("got %d, want %d or %d", len(params), needArgs-1, needArgs)
}
} else {
if len(params) != needArgs {
err = fmt.Errorf("got %d, want %d", len(params), needArgs)
}
}
lcheck(err, SherpaBadParams, "bad number of parameters")
values := make([]reflect.Value, needValues)
o := 0
if needsContext {
values[0] = reflect.ValueOf(ctx)
o = 1
}
args := make([]interface{}, needArgs)
for i := range args {
n := reflect.New(fnt.In(o + i))
values[o+i] = n.Elem()
args[i] = n.Interface()
}
dec = json.NewDecoder(bytes.NewReader(request.Params))
if !h.opts.LaxParameterParsing {
dec.DisallowUnknownFields()
}
err = dec.Decode(&args)
lcheck(err, SherpaBadParams, "parsing parameters")
errorType := reflect.TypeOf((*error)(nil)).Elem()
checkError := fnt.NumOut() > 0 && fnt.Out(fnt.NumOut()-1).Implements(errorType)
var results []reflect.Value
if fnt.IsVariadic() {
results = fn.CallSlice(values)
} else {
results = fn.Call(values)
}
if len(results) == 0 {
return nil, nil
}
rr := make([]interface{}, len(results))
for i, v := range results {
rr[i] = v.Interface()
}
if !checkError {
if len(rr) == 1 {
return rr[0], nil
}
return rr, nil
}
rr, rerr := rr[:len(rr)-1], rr[len(rr)-1]
var rv interface{} = rr
switch len(rr) {
case 0:
rv = nil
case 1:
rv = rr[0]
}
if rerr == nil {
return rv, nil
}
switch r := rerr.(type) {
case *Error:
return nil, r
case *InternalServerError:
return nil, r
case error:
return nil, &Error{Message: r.Error()}
default:
panic("checkError while type is not error")
}
}
func adjustFunctionNameCapitals(s string, opts HandlerOpts) string {
switch opts.AdjustFunctionNames {
case "":
return strings.ToLower(s[:1]) + s[1:]
case "none":
return s
case "lowerWord":
r := ""
for i, c := range s {
lc := unicode.ToLower(c)
if lc == c {
r += s[i:]
break
}
r += string(lc)
}
return r
default:
panic(fmt.Sprintf("bad value for AdjustFunctionNames: %q", opts.AdjustFunctionNames))
}
}
func gatherFunctions(functions map[string]reflect.Value, t reflect.Type, v reflect.Value, opts HandlerOpts) error {
if t.Kind() != reflect.Struct {
return fmt.Errorf("sherpa sections must be a struct (not a ptr)")
}
for i := 0; i < t.NumMethod(); i++ {
name := adjustFunctionNameCapitals(t.Method(i).Name, opts)
m := v.Method(i)
if _, ok := functions[name]; ok {
return fmt.Errorf("duplicate function %s", name)
}
functions[name] = m
}
for i := 0; i < t.NumField(); i++ {
err := gatherFunctions(functions, t.Field(i).Type, v.Field(i), opts)
if err != nil {
return err
}
}
return nil
}
// NewHandler returns a new http.Handler that serves all Sherpa API-related requests.
//
// Path is the path this API is available at.
//
// Version should be a semantic version.
//
// API should by a struct. It represents the root section. All methods of a
// section are exported as sherpa functions. All fields must be other sections
// (structs) whose methods are also exported. recursively. Method names must
// start with an uppercase character to be exported, but their exported names
// start with a lowercase character by default (but see HandlerOpts.AdjustFunctionNames).
//
// Doc is documentation for the top-level sherpa section, as generated by sherpadoc.
//
// Opts allows further configuration of the handler.
//
// Methods on the exported sections are exported as Sherpa functions.
// If the first parameter of a method is a context.Context, the context from the HTTP request is passed.
// This lets you abort work if the HTTP request underlying the function call disappears.
//
// Parameters and return values for exported functions are automatically converted from/to JSON.
// If the last element of a return value (if any) is an error,
// that error field is taken to indicate whether the call succeeded.
// Exported functions can also panic with an *Error or *InternalServerError to indicate a failed function call.
// Returning an error with a Code starting with "server" indicates an implementation error, which will be logged through the collector.
//
// Variadic functions can be called, but in the call (from the client), the variadic parameters must be passed in as an array.
//
// This handler strips "path" from the request.
func NewHandler(path string, version string, api interface{}, doc *sherpadoc.Section, opts *HandlerOpts) (http.Handler, error) {
var xopts HandlerOpts
if opts != nil {
xopts = *opts
}
if xopts.Collector == nil {
// We always want to have a collector, so we don't have to check for nil all the time when calling.
xopts.Collector = ignoreCollector{}
}
doc.Version = version
doc.SherpaVersion = SherpaVersion
functions := map[string]reflect.Value{
"_docs": reflect.ValueOf(func() *sherpadoc.Section {
return doc
}),
}
err := gatherFunctions(functions, reflect.TypeOf(api), reflect.ValueOf(api), xopts)
if err != nil {
return nil, err
}
names := make([]string, 0, len(functions))
for name := range functions {
names = append(names, name)
}
elems := strings.Split(strings.Trim(path, "/"), "/")
id := elems[len(elems)-1]
sherpaJSON := &JSON{
ID: id,
Title: doc.Name,
Functions: names,
BaseURL: "", // filled in during request
Version: version,
SherpaVersion: SherpaVersion,
SherpadocVersion: doc.SherpadocVersion,
}
h := http.StripPrefix(path, &handler{
path: path,
functions: functions,
sherpaJSON: sherpaJSON,
opts: xopts,
})
return h, nil
}
func badMethod(w http.ResponseWriter) {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
}
// return whether callback js snippet is valid.
// this is a coarse test. we disallow some valid js identifiers, like "\u03c0",
// and we allow many invalid ones, such as js keywords, "0intro" and identifiers starting/ending with ".", or having multiple dots.
func validCallback(cb string) bool {
if cb == "" {
return false
}
for _, c := range cb {
if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9' || c == '_' || c == '$' || c == '.' {
continue
}
return false
}
return true
}
// Serve a HTTP request for this Sherpa API.
// ServeHTTP expects the request path is stripped from the path it was mounted at with the http package.
//
// The following endpoints are handled:
// - sherpa.json, describing this API.
// - sherpa.js, a small stand-alone client JavaScript library that makes it trivial to start using this API from a browser.
// - functionName, for function invocations on this API.
//
// HTTP response will have CORS-headers set, and support the OPTIONS HTTP method.
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
hdr := w.Header()
hdr.Set("Access-Control-Allow-Origin", "*")
hdr.Set("Access-Control-Allow-Methods", "GET, POST")
hdr.Set("Access-Control-Allow-Headers", "Content-Type")
collector := h.opts.Collector
switch {
case r.URL.Path == "":
baseURL := getBaseURL(r) + h.path
docURL := "https://www.sherpadoc.org/#" + baseURL
err := htmlTemplate.Execute(w, map[string]interface{}{
"id": h.sherpaJSON.ID,
"title": h.sherpaJSON.Title,
"version": h.sherpaJSON.Version,
"docURL": docURL,
"jsURL": baseURL + "sherpa.js",
})
if err != nil {
log.Println(err)
}
case r.URL.Path == "sherpa.json":
switch r.Method {
case "OPTIONS":
w.WriteHeader(204)
case "GET":
collector.JSON()
hdr.Set("Content-Type", "application/json; charset=utf-8")
hdr.Set("Cache-Control", "no-cache")
sherpaJSON := &*h.sherpaJSON
sherpaJSON.BaseURL = getBaseURL(r) + h.path
err := json.NewEncoder(w).Encode(sherpaJSON)
if err != nil {
log.Println("writing sherpa.json response:", err)
}
default:
badMethod(w)
}
case r.URL.Path == "sherpa.js":
if r.Method != "GET" {
badMethod(w)
return
}
collector.JavaScript()
hdr.Set("Content-Type", "text/javascript; charset=utf-8")
hdr.Set("Cache-Control", "no-cache")
sherpaJSON := &*h.sherpaJSON
sherpaJSON.BaseURL = getBaseURL(r) + h.path
buf, err := json.Marshal(sherpaJSON)
js := strings.Replace(sherpaJS, "{{.sherpaJSON}}", string(buf), -1)
_, err = w.Write([]byte(js))
if err != nil {
log.Println("writing sherpa.js response:", err)
}
default:
name := r.URL.Path
fn, ok := h.functions[name]
switch r.Method {
case "OPTIONS":
w.WriteHeader(204)
case "POST":
hdr.Set("Cache-Control", "no-store")
if !ok {
collector.BadFunction()
respondJSON(w, 404, &response{Error: &Error{Code: SherpaBadFunction, Message: fmt.Sprintf("function %q does not exist", name)}})
return
}
ct := r.Header.Get("Content-Type")
if ct == "" {
collector.ProtocolError()
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("missing content-type")}})
return
}
mt, mtparams, err := mime.ParseMediaType(ct)
if err != nil {
collector.ProtocolError()
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("invalid content-type %q", ct)}})
return
}
if mt != "application/json" {
collector.ProtocolError()
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`unrecognized content-type %q, expecting "application/json"`, mt)}})
return
}
charset, ok := mtparams["charset"]
if ok && strings.ToLower(charset) != "utf-8" {
collector.ProtocolError()
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`unexpected charset %q, expecting "utf-8"`, charset)}})
return
}
t0 := time.Now()
r, xerr := h.call(r.Context(), name, fn, r.Body)
durationSec := float64(time.Now().Sub(t0)) / float64(time.Second)
if xerr != nil {
switch err := xerr.(type) {
case *InternalServerError:
collector.FunctionCall(name, durationSec, err.Code)
respondJSON(w, 500, &response{Error: err.error()})
case *Error:
collector.FunctionCall(name, durationSec, err.Code)
respondJSON(w, 200, &response{Error: err})
default:
collector.FunctionCall(name, durationSec, "server:panic")
panic(err)
}
} else {
var v interface{}
if raw, ok := r.(Raw); ok {
v = raw
} else {
v = &response{Result: r}
}
collector.FunctionCall(name, durationSec, "")
respondJSON(w, 200, v)
}
case "GET":
hdr.Set("Cache-Control", "no-store")
jsonp := false
if !ok {
collector.BadFunction()
respondJSON(w, 404, &response{Error: &Error{Code: SherpaBadFunction, Message: fmt.Sprintf("function %q does not exist", name)}})
return
}
err := r.ParseForm()
if err != nil {
collector.ProtocolError()
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf("could not parse query string")}})
return
}
callback := r.Form.Get("callback")
_, ok := r.Form["callback"]
if ok {
if !validCallback(callback) {
collector.ProtocolError()
respondJSON(w, 200, &response{Error: &Error{Code: SherpaBadRequest, Message: fmt.Sprintf(`invalid callback name %q`, callback)}})
return
}
jsonp = true
}
// We allow an empty list to be missing to make it cleaner & easier to call health check functions (no ugly urls).
body := r.Form.Get("body")
_, ok = r.Form["body"]
if !ok {
body = `{"params": []}`
}
t0 := time.Now()
r, xerr := h.call(r.Context(), name, fn, strings.NewReader(body))
durationSec := float64(time.Now().Sub(t0)) / float64(time.Second)
if xerr != nil {
switch err := xerr.(type) {
case *InternalServerError:
collector.FunctionCall(name, durationSec, err.Code)
respond(w, 500, &response{Error: err.error()}, jsonp, callback)
case *Error:
collector.FunctionCall(name, durationSec, err.Code)
respond(w, 200, &response{Error: err}, jsonp, callback)
default:
collector.FunctionCall(name, durationSec, "server:panic")
panic(err)
}
} else {
var v interface{}
if raw, ok := r.(Raw); ok {
v = raw
} else {
v = &response{Result: r}
}
collector.FunctionCall(name, durationSec, "")
respond(w, 200, v, jsonp, callback)
}
default:
badMethod(w)
}
}
}

87
vendor/github.com/mjl-/sherpa/intstr.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
package sherpa
import (
"encoding/json"
"fmt"
"strconv"
)
// Int64s is an int64 that can be read as either a JSON string or JSON number, to
// be used in sherpa function parameters for compatibility with JavaScript.
// For struct fields, use the "json:,string" struct tag instead.
type Int64s int64
// Int returns the int64 value.
func (i Int64s) Int() int64 {
return int64(i)
}
// MarshalJSON returns a JSON-string-encoding of the int64.
func (i *Int64s) MarshalJSON() ([]byte, error) {
var v int64
if i != nil {
v = int64(*i)
}
return json.Marshal(fmt.Sprintf("%d", v))
}
// UnmarshalJSON parses JSON into the int64. Both a string encoding as a number
// encoding are allowed. JavaScript clients must use the string encoding because
// the number encoding loses precision at 1<<53.
func (i *Int64s) UnmarshalJSON(buf []byte) error {
var s string
if len(buf) > 0 && buf[0] == '"' {
err := json.Unmarshal(buf, &s)
if err != nil {
return err
}
} else {
s = string(buf)
}
vv, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return err
}
*i = Int64s(vv)
return nil
}
// Uint64s is an uint64 that can be read as either a JSON string or JSON number, to
// be used in sherpa function parameters for compatibility with JavaScript.
// For struct fields, use the "json:,string" struct tag instead.
type Uint64s uint64
// Int returns the uint64 value.
func (i Uint64s) Int() uint64 {
return uint64(i)
}
// MarshalJSON returns a JSON-string-encoding of the uint64.
func (i *Uint64s) MarshalJSON() ([]byte, error) {
var v uint64
if i != nil {
v = uint64(*i)
}
return json.Marshal(fmt.Sprintf("%d", v))
}
// UnmarshalJSON parses JSON into the uint64. Both a string encoding as a number
// encoding are allowed. JavaScript clients must use the string encoding because
// the number encoding loses precision at 1<<53.
func (i *Uint64s) UnmarshalJSON(buf []byte) error {
var s string
if len(buf) > 0 && buf[0] == '"' {
err := json.Unmarshal(buf, &s)
if err != nil {
return err
}
} else {
s = string(buf)
}
vv, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return err
}
*i = Uint64s(vv)
return nil
}

13
vendor/github.com/mjl-/sherpa/isclosed.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
//go:build !plan9
// +build !plan9
package sherpa
import (
"errors"
"syscall"
)
func isConnectionClosed(err error) bool {
return errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET)
}

6
vendor/github.com/mjl-/sherpa/isclosed_plan9.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
package sherpa
func isConnectionClosed(err error) bool {
// todo: needs a better test
return false
}

136
vendor/github.com/mjl-/sherpa/sherpajs.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
package sherpa
var sherpaJS = `
'use strict';
(function(undefined) {
var sherpa = {};
// prepare basic support for promises.
// we return functions with a "then" method only. our "then" isn't chainable. and you don't get other promise-related methods.
// but this "then" is enough so your browser's promise library (or a polyfill) can turn it into a real promise.
function thenable(fn) {
var settled = false;
var fulfilled = false;
var result = null;
var goods = [];
var bads = [];
// promise lib will call the returned function, make it the same as our .then function
var nfn = function(goodfn, badfn) {
if(settled) {
if(fulfilled && goodfn) {
goodfn(result);
}
if(!fulfilled && badfn) {
badfn(result);
}
} else {
if(goodfn) {
goods.push(goodfn);
}
if(badfn) {
bads.push(badfn);
}
}
};
nfn.then = nfn;
function done() {
while(fulfilled && goods.length > 0) {
goods.shift()(result);
}
while(!fulfilled && bads.length > 0) {
bads.shift()(result);
}
}
function makeSettle(xfulfilled) {
return function(arg) {
if(settled) {
return;
}
settled = true;
fulfilled = xfulfilled;
result = arg;
done();
};
}
var resolve = makeSettle(true);
var reject = makeSettle(false);
try {
fn(resolve, reject);
} catch(e) {
reject(e);
}
return nfn;
}
function postJSON(url, param, success, error) {
var req = new window.XMLHttpRequest();
req.open('POST', url, true);
req.onload = function onload() {
if(req.status >= 200 && req.status < 400) {
success(JSON.parse(req.responseText));
} else {
if(req.status === 404) {
error({code: 'sherpaBadFunction', message: 'function does not exist'});
} else {
error({code: 'sherpaHttpError', message: 'error calling function, HTTP status: '+req.status});
}
}
};
req.onerror = function onerror() {
error({code: 'sherpaClientError', message: 'connection failed'});
};
req.setRequestHeader('Content-Type', 'application/json');
req.send(JSON.stringify(param));
}
function makeFunction(api, name) {
return function() {
var params = Array.prototype.slice.call(arguments, 0);
return api._wrapThenable(thenable(function(resolve, reject) {
postJSON(api._sherpa.baseurl+name, {params: params}, function(response) {
if(response && response.error) {
reject(response.error);
} else if(response && response.hasOwnProperty('result')) {
resolve(response.result);
} else {
reject({code: 'sherpaBadResponse', message: "invalid sherpa response object, missing 'result'"});
}
}, reject);
}));
};
}
sherpa.init = function init(_sherpa) {
var api = {};
function _wrapThenable(thenable) {
return thenable;
}
function _call(name) {
return makeFunction(api, name).apply(Array.prototype.slice.call(arguments, 1));
}
api._sherpa = _sherpa;
api._wrapThenable = _wrapThenable;
api._call = _call;
for(var i = 0; i < _sherpa.functions.length; i++) {
var fn = _sherpa.functions[i];
api[fn] = makeFunction(api, fn);
}
return api;
};
var _sherpa = {{.sherpaJSON}};
window[_sherpa.id] = sherpa.init(_sherpa);
})();
`

7
vendor/github.com/mjl-/sherpadoc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2016-2019 Mechiel Lukkien
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

28
vendor/github.com/mjl-/sherpadoc/README.txt generated vendored Normal file
View File

@ -0,0 +1,28 @@
sherpadoc - documentation for sherpa API's
Go package containing type defintions for sherpa documentation for encoding to and decoding from json.
Also contains the sherpadoc command reads Go code and writes sherpadoc JSON.
Use together with the sherpa library, github.com/mjl-/sherpa.
Read more about sherpa at https://www.ueber.net/who/mjl/sherpa/
# About
Written by Mechiel Lukkien, mechiel@ueber.net.
Bug fixes, patches, comments are welcome.
MIT-licensed, see LICENSE.
# todo
- major cleanup required. too much parsing is done that can probably be handled by the go/* packages.
- check that all cases of embedding work
- check that all cross-package referencing (ast.SelectorExpr) works
- better cli syntax for replacements, and always replace based on fully qualified names. currently you need to specify both the fully qualified and unqualified type paths.
- see if order of items in output depends on a map somewhere, i've seen diffs for generated jsons where a type was only moved, not modified.
- better error messages and error handling, stricter parsing
- support type aliases
- support plain iota enums? currently only simple literals are supported for enums.
- support complete expressions for enum consts?
- find out which go constructs people want to use that aren't yet implemented by sherpadoc
- when to make a field nullable. when omitempty is set? (currently yes), when field is a pointer type (currently yes). should we have a way to prevent nullable without omitempty set, or make field a pointer without it being nullable?
- write tests

166
vendor/github.com/mjl-/sherpadoc/check.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
package sherpadoc
import (
"fmt"
)
type genError struct{ error }
func parseError(path string, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
err := fmt.Errorf("invalid sherpadoc at %s: %s", path, msg)
panic(genError{err})
}
func makePath(path string, field string, index int, name string) string {
return fmt.Sprintf("%s.%s[%d (%q)]", path, field, index, name)
}
// NOTE: sherpaweb/ts/parse.ts and sherpadoc/check.go contain the same checking.
// The code is very similar. Best keep it in sync and modify the implementations in tandem.
type checker struct {
types map[string]struct{}
functions map[string]struct{}
}
func (c checker) markIdent(path, ident string) {
if _, ok := c.types[ident]; ok {
parseError(path, "duplicate type %q", ident)
}
c.types[ident] = struct{}{}
}
func (c checker) walkTypeNames(path string, sec *Section) {
for i, t := range sec.Structs {
c.markIdent(makePath(path, "Structs", i, t.Name), t.Name)
}
for i, t := range sec.Ints {
npath := makePath(path, "Ints", i, t.Name)
c.markIdent(npath, t.Name)
for j, v := range t.Values {
c.markIdent(makePath(npath, "Values", j, v.Name), v.Name)
}
}
for i, t := range sec.Strings {
npath := makePath(path, "Strings", i, t.Name)
c.markIdent(npath, t.Name)
for j, v := range t.Values {
c.markIdent(makePath(npath, "Values", j, v.Name), v.Name)
}
}
for i, subsec := range sec.Sections {
c.walkTypeNames(makePath(path, "Sections", i, subsec.Name), subsec)
}
}
func (c checker) walkFunctionNames(path string, sec *Section) {
for i, fn := range sec.Functions {
npath := makePath(path, "Functions", i, fn.Name)
if _, ok := c.functions[fn.Name]; ok {
parseError(npath, "duplicate function %q", fn.Name)
}
c.functions[fn.Name] = struct{}{}
paramNames := map[string]struct{}{}
for i, arg := range fn.Params {
if _, ok := paramNames[arg.Name]; ok {
parseError(makePath(npath, "Params", i, arg.Name), "duplicate parameter name")
}
paramNames[arg.Name] = struct{}{}
}
returnNames := map[string]struct{}{}
for i, arg := range fn.Returns {
if _, ok := returnNames[arg.Name]; ok {
parseError(makePath(npath, "Returns", i, arg.Name), "duplicate return name")
}
returnNames[arg.Name] = struct{}{}
}
}
for i, subsec := range sec.Sections {
c.walkFunctionNames(makePath(path, "Sections", i, subsec.Name), subsec)
}
}
func (c checker) checkTypewords(path string, tokens []string, okNullable bool) {
if len(tokens) == 0 {
parseError(path, "unexpected end of typewords")
}
t := tokens[0]
tokens = tokens[1:]
switch t {
case "nullable":
if !okNullable {
parseError(path, "repeated nullable in typewords")
}
if len(tokens) == 0 {
parseError(path, "missing typeword after %#v", t)
}
c.checkTypewords(path, tokens, false)
case "any", "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "int64s", "uint64s", "float32", "float64", "string", "timestamp":
if len(tokens) != 0 {
parseError(path, "leftover typewords %v", tokens)
}
case "[]", "{}":
if len(tokens) == 0 {
parseError(path, "missing typeword after %#v", t)
}
c.checkTypewords(path, tokens, true)
default:
_, ok := c.types[t]
if !ok {
parseError(path, "referenced type %q does not exist", t)
}
if len(tokens) != 0 {
parseError(path, "leftover typewords %v", tokens)
}
}
}
func (c checker) walkTypewords(path string, sec *Section) {
for i, t := range sec.Structs {
npath := makePath(path, "Structs", i, t.Name)
for j, f := range t.Fields {
c.checkTypewords(makePath(npath, "Fields", j, f.Name), f.Typewords, true)
}
}
for i, fn := range sec.Functions {
npath := makePath(path, "Functions", i, fn.Name)
for j, arg := range fn.Params {
c.checkTypewords(makePath(npath, "Params", j, arg.Name), arg.Typewords, true)
}
for j, arg := range fn.Returns {
c.checkTypewords(makePath(npath, "Returns", j, arg.Name), arg.Typewords, true)
}
}
for i, subsec := range sec.Sections {
c.walkTypewords(makePath(path, "Sections", i, subsec.Name), subsec)
}
}
// Check walks the sherpa section and checks it for correctness. It checks for:
//
// - Duplicate type names.
// - Duplicate parameter or return names.
// - References to types that are not defined.
// - Validity of typewords.
func Check(doc *Section) (retErr error) {
defer func() {
e := recover()
if e != nil {
g, ok := e.(genError)
if !ok {
panic(e)
}
retErr = error(g)
}
}()
c := checker{map[string]struct{}{}, map[string]struct{}{}}
c.walkTypeNames("", doc)
c.walkFunctionNames("", doc)
c.walkTypewords("", doc)
return nil
}

270
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/main.go generated vendored Normal file
View File

@ -0,0 +1,270 @@
/*
Sherpadoc parses Go code and outputs sherpa documentation in JSON.
This documentation is provided to the sherpa HTTP handler to serve
as documentation through the _docs function.
Example:
sherpadoc Awesome >awesome.json
Sherpadoc parses Go code, finds a struct named "Awesome", and gathers
documentation:
Comments above the struct are used as section documentation. Fields
in section structs must are treated as subsections, and can in turn
contain subsections. These subsections and their methods are also
exported and documented in the sherpa API. Add a struct tag "sherpa"
to override the name of the subsection, for example `sherpa:"Another
Awesome API"`.
Comments above method names are function documentation. A synopsis
is automatically generated.
Types used as parameters or return values are added to the section
documentation where they are used. The comments above the type are
used, as well as the comments for each field in a struct. The
documented field names know about the "json" struct field tags.
More eloborate example:
sherpadoc
-title 'Awesome API by mjl' \
-replace 'pkg.Type string,example.com/some/pkg.SomeType [] string' \
path/to/awesome/code Awesome \
>awesome.json
Most common Go code patterns for API functions have been implemented
in sherpadoc, but you may run into missing support.
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"github.com/mjl-/sherpadoc"
"golang.org/x/mod/modfile"
)
var (
packagePath = flag.String("package-path", ".", "of source code to parse")
replace = flag.String("replace", "", "comma-separated list of type replacements, e.g. \"somepkg.SomeType string\"")
title = flag.String("title", "", "title of the API, default is the name of the type of the main API")
adjustFunctionNames = flag.String("adjust-function-names", "", `by default, the first character of function names is turned into lower case; with "lowerWord" the first string of upper case characters is lower cased, with "none" the name is left as is`)
)
// If there is a "vendor" directory, we'll load packages from there (instead of
// through (slower) packages.Load), and we need to know the module name to resolve
// imports to paths in vendor.
var (
gomodFile *modfile.File
gomodDir string
)
type field struct {
Name string
Typewords []string
Doc string
Fields []*field
}
func (f field) TypeString() string {
t := []string{}
for _, e := range f.Typewords {
if e == "nullable" {
e = "*"
}
t = append(t, e)
}
return strings.Join(t, "")
}
type typeKind int
const (
typeStruct typeKind = iota
typeInts
typeStrings
typeBytes
)
// NamedType represents the type of a parameter or return value.
type namedType struct {
Name string
Text string
Kind typeKind
Fields []*field // For kind is typeStruct.
// For kind is typeInts
IntValues []struct {
Name string
Value int
Docs string
}
// For kind is typeStrings
StringValues []struct {
Name string
Value string
Docs string
}
}
type function struct {
Name string
Text string
Params []sherpadoc.Arg
Returns []sherpadoc.Arg
}
// Section is an API section with docs, functions and subsections.
// Types are gathered per section, and moved up the section tree to the first common ancestor, so types are only documented once.
type section struct {
TypeName string // Name of the type for this section.
Name string // Name of the section. Either same as TypeName, or overridden with a "sherpa" struct tag.
Text string
Types []*namedType
Typeset map[string]struct{}
Functions []*function
Sections []*section
}
func check(err error, action string) {
if err != nil {
log.Fatalf("%s: %s", action, err)
}
}
func usage() {
log.Println("usage: sherpadoc [flags] section")
flag.PrintDefaults()
os.Exit(2)
}
func main() {
log.SetFlags(0)
flag.Usage = usage
flag.Parse()
args := flag.Args()
if len(args) != 1 {
usage()
}
// If vendor exists, we load packages from it.
for dir, _ := os.Getwd(); dir != "" && dir != "/"; dir = filepath.Dir(dir) {
p := filepath.Join(dir, "go.mod")
if _, err := os.Stat(p); err != nil && os.IsNotExist(err) {
continue
} else if err != nil {
log.Printf("searching for go.mod: %v", err)
break
}
if _, err := os.Stat(filepath.Join(dir, "vendor")); err != nil {
break
}
if gomod, err := os.ReadFile(p); err != nil {
log.Fatalf("reading go.mod: %s", err)
} else if mf, err := modfile.ParseLax("go.mod", gomod, nil); err != nil {
log.Fatalf("parsing go.mod: %s", err)
} else {
gomodFile = mf
gomodDir = dir
}
}
section := parseDoc(args[0], *packagePath)
if *title != "" {
section.Name = *title
}
moveTypesUp(section)
doc := sherpaSection(section)
doc.SherpaVersion = 0
doc.SherpadocVersion = sherpadoc.SherpadocVersion
err := sherpadoc.Check(doc)
check(err, "checking sherpadoc output before writing")
writeJSON(doc)
}
func writeJSON(v interface{}) {
buf, err := json.MarshalIndent(v, "", "\t")
check(err, "marshal to json")
_, err = os.Stdout.Write(buf)
check(err, "writing json to stdout")
_, err = fmt.Println()
check(err, "write to stdout")
}
type typeCount struct {
t *namedType
count int
}
// Move types used in multiple sections up to their common ancestor.
func moveTypesUp(sec *section) {
// First, the process for each child.
for _, s := range sec.Sections {
moveTypesUp(s)
}
// Count how often a type is used from here downwards.
// If more than once, move the type up to here.
counts := map[string]*typeCount{}
countTypes(counts, sec)
for _, tc := range counts {
if tc.count <= 1 {
continue
}
for _, sub := range sec.Sections {
removeType(sub, tc.t)
}
if !hasType(sec, tc.t) {
sec.Types = append(sec.Types, tc.t)
}
}
}
func countTypes(counts map[string]*typeCount, sec *section) {
for _, t := range sec.Types {
_, ok := counts[t.Name]
if !ok {
counts[t.Name] = &typeCount{t, 0}
}
counts[t.Name].count++
}
for _, subsec := range sec.Sections {
countTypes(counts, subsec)
}
}
func removeType(sec *section, t *namedType) {
types := make([]*namedType, 0, len(sec.Types))
for _, tt := range sec.Types {
if tt.Name != t.Name {
types = append(types, tt)
}
}
sec.Types = types
for _, sub := range sec.Sections {
removeType(sub, t)
}
}
func hasType(sec *section, t *namedType) bool {
for _, tt := range sec.Types {
if tt.Name == t.Name {
return true
}
}
return false
}

857
vendor/github.com/mjl-/sherpadoc/cmd/sherpadoc/parse.go generated vendored Normal file
View File

@ -0,0 +1,857 @@
package main
import (
"fmt"
"go/ast"
"go/doc"
"go/parser"
"go/token"
"log"
"os"
"path/filepath"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"unicode"
"golang.org/x/tools/go/packages"
"github.com/mjl-/sherpadoc"
)
// ParsedPackage possibly includes some of its imports because the package that contains the section references it.
type parsedPackage struct {
Fset *token.FileSet // Used with a token.Pos to get offending locations.
Path string // Of import, used for keeping duplicate type names from different packages unique.
Pkg *ast.Package // Needed for its files: we need a file to find the package path and identifier used to reference other types.
Docpkg *doc.Package
Imports map[string]*parsedPackage // Package/import path to parsed packages.
}
type typewords []string
func (pp *parsedPackage) lookupType(name string) *doc.Type {
for _, t := range pp.Docpkg.Types {
if t.Name == name {
return t
}
}
return nil
}
// Like log.Fatalf, but prefixes error message with offending file position (if known).
// pp is the package where the position tok belongs to.
func logFatalLinef(pp *parsedPackage, tok token.Pos, format string, args ...interface{}) {
if !tok.IsValid() {
log.Fatalf(format, args...)
}
msg := fmt.Sprintf(format, args...)
log.Fatalf("%s: %s", pp.Fset.Position(tok).String(), msg)
}
// Documentation for a single field, with text above the field, and
// on the right of the field combined.
func fieldDoc(f *ast.Field) string {
s := ""
if f.Doc != nil {
s += strings.Replace(strings.TrimSpace(f.Doc.Text()), "\n", " ", -1)
}
if f.Comment != nil {
if s != "" {
s += "; "
}
s += strings.TrimSpace(f.Comment.Text())
}
return s
}
// Parse string literal. Errors are fatal.
func parseStringLiteral(s string) string {
r, err := strconv.Unquote(s)
check(err, "parsing string literal")
return r
}
func jsonName(tag string, name string) string {
s := reflect.StructTag(tag).Get("json")
if s == "" || strings.HasPrefix(s, ",") {
return name
} else if s == "-" {
return ""
} else {
return strings.Split(s, ",")[0]
}
}
// Return the names (can be none) for a field. Takes exportedness
// and JSON tag annotation into account.
func nameList(names []*ast.Ident, tag *ast.BasicLit) []string {
if names == nil {
return nil
}
l := []string{}
for _, name := range names {
if ast.IsExported(name.Name) {
l = append(l, name.Name)
}
}
if len(l) == 1 && tag != nil {
name := jsonName(parseStringLiteral(tag.Value), l[0])
if name != "" {
return []string{name}
}
return nil
}
return l
}
// Parses a top-level sherpadoc section.
func parseDoc(apiName, packagePath string) *section {
fset := token.NewFileSet()
pkgs, firstErr := parser.ParseDir(fset, packagePath, nil, parser.ParseComments)
check(firstErr, "parsing code")
for _, pkg := range pkgs {
docpkg := doc.New(pkg, "", doc.AllDecls)
for _, t := range docpkg.Types {
if t.Name == apiName {
par := &parsedPackage{
Fset: fset,
Path: packagePath,
Pkg: pkg,
Docpkg: docpkg,
Imports: make(map[string]*parsedPackage),
}
return parseSection(t, par)
}
}
}
log.Fatalf("type %q not found", apiName)
return nil
}
// Parse a section and its optional subsections, recursively.
// t is the type of the struct with the sherpa methods to be parsed.
func parseSection(t *doc.Type, pp *parsedPackage) *section {
sec := &section{
t.Name,
t.Name,
strings.TrimSpace(t.Doc),
nil,
map[string]struct{}{},
nil,
nil,
}
// make list of methods to parse, sorted by position in file name.
methods := make([]*doc.Func, len(t.Methods))
copy(methods, t.Methods)
sort.Slice(methods, func(i, j int) bool {
return methods[i].Decl.Name.NamePos < methods[j].Decl.Name.NamePos
})
for _, fn := range methods {
parseMethod(sec, fn, pp)
}
// parse subsections
ts := t.Decl.Specs[0].(*ast.TypeSpec)
expr := ts.Type
st := expr.(*ast.StructType)
for _, f := range st.Fields.List {
ident, ok := f.Type.(*ast.Ident)
if !ok {
continue
}
name := ident.Name
if f.Tag != nil {
name = reflect.StructTag(parseStringLiteral(f.Tag.Value)).Get("sherpa")
}
subt := pp.lookupType(ident.Name)
if subt == nil {
logFatalLinef(pp, ident.Pos(), "subsection %q not found", ident.Name)
}
subsec := parseSection(subt, pp)
subsec.Name = name
sec.Sections = append(sec.Sections, subsec)
}
return sec
}
// Ensure type "t" (used in a field or argument) defined in package pp is parsed
// and added to the section.
func ensureNamedType(t *doc.Type, sec *section, pp *parsedPackage) {
typePath := pp.Path + "." + t.Name
if _, have := sec.Typeset[typePath]; have {
return
}
tt := &namedType{
Name: t.Name,
Text: strings.TrimSpace(t.Doc),
}
// add it early, so self-referencing types can't cause a loop
sec.Types = append(sec.Types, tt)
sec.Typeset[typePath] = struct{}{}
ts := t.Decl.Specs[0].(*ast.TypeSpec)
if ts.Assign.IsValid() {
logFatalLinef(pp, t.Decl.TokPos, "type aliases not yet supported")
}
var gatherFields func(e ast.Expr, typeName string, xpp *parsedPackage)
var gatherStructFields func(nt *ast.StructType, typeName string, xpp *parsedPackage)
gatherFields = func(e ast.Expr, typeName string, xpp *parsedPackage) {
switch xt := e.(type) {
case *ast.Ident:
// Bare type name.
tt := xpp.lookupType(xt.Name)
if tt == nil {
log.Fatalf("could not find type %q used in type %q in package %q", xt.Name, typeName, xpp.Path)
}
tts := tt.Decl.Specs[0].(*ast.TypeSpec)
if ts.Assign.IsValid() {
logFatalLinef(xpp, tt.Decl.TokPos, "type aliases not yet supported")
}
tst, ok := tts.Type.(*ast.StructType)
if !ok {
logFatalLinef(xpp, tt.Decl.TokPos, "unexpected field type %T", tts.Type)
}
gatherStructFields(tst, tt.Name, xpp)
case *ast.StarExpr:
// Field with "*", handle as if without *.
gatherFields(xt.X, typeName, xpp)
case *ast.SelectorExpr:
// With package prefix, lookup the type in the package and gather its fields.
dt, nxpp := parseFieldSelector(useSrc{xpp, typeName}, xt)
tts := dt.Decl.Specs[0].(*ast.TypeSpec)
if ts.Assign.IsValid() {
logFatalLinef(nxpp, dt.Decl.TokPos, "type aliases not yet supported")
}
tst, ok := tts.Type.(*ast.StructType)
if !ok {
logFatalLinef(nxpp, dt.Decl.TokPos, "unexpected field type %T", tts.Type)
}
gatherStructFields(tst, dt.Name, nxpp)
default:
logFatalLinef(xpp, t.Decl.TokPos, "unsupported field with type %T", e)
}
}
gatherStructFields = func(nt *ast.StructType, typeName string, xpp *parsedPackage) {
for _, f := range nt.Fields.List {
if len(f.Names) == 0 {
// Embedded field. Treat its fields as if they were included.
gatherFields(f.Type, typeName, xpp)
continue
}
// Check if we need this type. Otherwise we may trip
// over an unhandled type that we wouldn't include in
// the output (eg due to a struct tag).
names := nameList(f.Names, f.Tag)
need := false
for _, name := range names {
if name != "" {
need = true
break
}
}
if !need {
continue
}
ff := &field{
"",
nil,
fieldDoc(f),
[]*field{},
}
ff.Typewords = gatherFieldType(t.Name, ff, f.Type, f.Tag, sec, xpp)
for _, name := range nameList(f.Names, f.Tag) {
nf := &field{}
*nf = *ff
nf.Name = name
tt.Fields = append(tt.Fields, nf)
}
}
}
switch nt := ts.Type.(type) {
case *ast.StructType:
tt.Kind = typeStruct
gatherStructFields(nt, t.Name, pp)
case *ast.ArrayType:
if ident, ok := nt.Elt.(*ast.Ident); ok && ident.Name == "byte" {
tt.Kind = typeBytes
} else {
logFatalLinef(pp, t.Decl.TokPos, "named type with unsupported element type %T", ts.Type)
}
case *ast.Ident:
if strings.HasSuffix(typePath, "sherpa.Int64s") || strings.HasSuffix(typePath, "sherpa.Uint64s") {
return
}
tt.Text = t.Doc + ts.Comment.Text()
switch nt.Name {
case "byte", "int16", "uint16", "int32", "uint32", "int", "uint":
tt.Kind = typeInts
case "string":
tt.Kind = typeStrings
default:
logFatalLinef(pp, t.Decl.TokPos, "unrecognized type identifier %#v", nt.Name)
}
for _, c := range t.Consts {
for _, spec := range c.Decl.Specs {
vs, ok := spec.(*ast.ValueSpec)
if !ok {
logFatalLinef(pp, spec.Pos(), "unsupported non-ast.ValueSpec constant %#v", spec)
}
if len(vs.Names) != 1 {
logFatalLinef(pp, vs.Pos(), "unsupported multiple .Names in %#v", vs)
}
name := vs.Names[0].Name
if len(vs.Values) != 1 {
logFatalLinef(pp, vs.Pos(), "unsupported multiple .Values in %#v", vs)
}
lit, ok := vs.Values[0].(*ast.BasicLit)
if !ok {
logFatalLinef(pp, vs.Pos(), "unsupported non-ast.BasicLit first .Values %#v", vs)
}
comment := vs.Doc.Text() + vs.Comment.Text()
switch lit.Kind {
case token.INT:
if tt.Kind != typeInts {
logFatalLinef(pp, lit.Pos(), "int value for for non-int-enum %q", t.Name)
}
v, err := strconv.ParseInt(lit.Value, 10, 64)
check(err, "parse int literal")
iv := struct {
Name string
Value int
Docs string
}{name, int(v), strings.TrimSpace(comment)}
tt.IntValues = append(tt.IntValues, iv)
case token.STRING:
if tt.Kind != typeStrings {
logFatalLinef(pp, lit.Pos(), "string for non-string-enum %q", t.Name)
}
v, err := strconv.Unquote(lit.Value)
check(err, "unquote literal")
sv := struct {
Name string
Value string
Docs string
}{name, v, strings.TrimSpace(comment)}
tt.StringValues = append(tt.StringValues, sv)
default:
logFatalLinef(pp, lit.Pos(), "unexpected literal kind %#v", lit.Kind)
}
}
}
default:
logFatalLinef(pp, t.Decl.TokPos, "unsupported field/param/return type %T", ts.Type)
}
}
func hasOmitEmpty(tag *ast.BasicLit) bool {
return hasJSONTagValue(tag, "omitempty")
}
// isCommaString returns whether the tag (may be nil) contains a "json:,string" directive.
func isCommaString(tag *ast.BasicLit) bool {
return hasJSONTagValue(tag, "string")
}
func hasJSONTagValue(tag *ast.BasicLit, v string) bool {
if tag == nil {
return false
}
st := reflect.StructTag(parseStringLiteral(tag.Value))
s, ok := st.Lookup("json")
if !ok || s == "-" {
return false
}
t := strings.Split(s, ",")
for _, e := range t[1:] {
if e == v {
return true
}
}
return false
}
func gatherFieldType(typeName string, f *field, e ast.Expr, fieldTag *ast.BasicLit, sec *section, pp *parsedPackage) typewords {
nullablePrefix := typewords{}
if hasOmitEmpty(fieldTag) {
nullablePrefix = typewords{"nullable"}
}
name := checkReplacedType(useSrc{pp, typeName}, e)
if name != nil {
if name[0] != "nullable" {
return append(nullablePrefix, name...)
}
return name
}
switch t := e.(type) {
case *ast.Ident:
tt := pp.lookupType(t.Name)
if tt != nil {
ensureNamedType(tt, sec, pp)
return []string{t.Name}
}
commaString := isCommaString(fieldTag)
name := t.Name
switch name {
case "byte":
name = "uint8"
case "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float32", "float64", "string", "any":
case "int64", "uint64":
if commaString {
name += "s"
}
case "int", "uint":
name += "32"
default:
logFatalLinef(pp, t.Pos(), "unsupported field type %q used in type %q in package %q", name, typeName, pp.Path)
}
if commaString && name != "int64s" && name != "uint64s" {
logFatalLinef(pp, t.Pos(), "unsupported tag `json:,\"string\"` for non-64bit int in %s.%s", typeName, f.Name)
}
return append(nullablePrefix, name)
case *ast.ArrayType:
return append(nullablePrefix, append([]string{"[]"}, gatherFieldType(typeName, f, t.Elt, nil, sec, pp)...)...)
case *ast.MapType:
_ = gatherFieldType(typeName, f, t.Key, nil, sec, pp)
vt := gatherFieldType(typeName, f, t.Value, nil, sec, pp)
return append(nullablePrefix, append([]string{"{}"}, vt...)...)
case *ast.InterfaceType:
// If we export an interface as an "any" type, we want to make sure it's intended.
// Require the user to be explicit with an empty interface.
if t.Methods != nil && len(t.Methods.List) > 0 {
logFatalLinef(pp, t.Pos(), "unsupported non-empty interface param/return type %T", t)
}
return append(nullablePrefix, "any")
case *ast.StarExpr:
tw := gatherFieldType(typeName, f, t.X, fieldTag, sec, pp)
if tw[0] != "nullable" {
tw = append([]string{"nullable"}, tw...)
}
return tw
case *ast.SelectorExpr:
return append(nullablePrefix, parseSelector(t, typeName, sec, pp))
}
logFatalLinef(pp, e.Pos(), "unimplemented ast.Expr %#v for struct %q field %q in gatherFieldType", e, typeName, f.Name)
return nil
}
func parseArgType(e ast.Expr, sec *section, pp *parsedPackage) typewords {
name := checkReplacedType(useSrc{pp, sec.Name}, e)
if name != nil {
return name
}
switch t := e.(type) {
case *ast.Ident:
tt := pp.lookupType(t.Name)
if tt != nil {
ensureNamedType(tt, sec, pp)
return []string{t.Name}
}
name := t.Name
switch name {
case "byte":
name = "uint8"
case "bool", "int8", "uint8", "int16", "uint16", "int32", "uint32", "int64", "uint64", "float32", "float64", "string", "any":
case "int", "uint":
name += "32"
case "error":
// allowed here, checked if in right location by caller
default:
logFatalLinef(pp, t.Pos(), "unsupported arg type %q", name)
}
return []string{name}
case *ast.ArrayType:
return append([]string{"[]"}, parseArgType(t.Elt, sec, pp)...)
case *ast.Ellipsis:
// Ellipsis parameters to a function must be passed as an array, so document it that way.
return append([]string{"[]"}, parseArgType(t.Elt, sec, pp)...)
case *ast.MapType:
_ = parseArgType(t.Key, sec, pp)
vt := parseArgType(t.Value, sec, pp)
return append([]string{"{}"}, vt...)
case *ast.InterfaceType:
// If we export an interface as an "any" type, we want to make sure it's intended.
// Require the user to be explicit with an empty interface.
if t.Methods != nil && len(t.Methods.List) > 0 {
logFatalLinef(pp, t.Pos(), "unsupported non-empty interface param/return type %T", t)
}
return []string{"any"}
case *ast.StarExpr:
return append([]string{"nullable"}, parseArgType(t.X, sec, pp)...)
case *ast.SelectorExpr:
return []string{parseSelector(t, sec.TypeName, sec, pp)}
}
logFatalLinef(pp, e.Pos(), "unimplemented ast.Expr %#v in parseArgType", e)
return nil
}
// Parse the selector of a field, returning the type and the parsed package it exists in. This cannot be a builtin type.
func parseFieldSelector(u useSrc, t *ast.SelectorExpr) (*doc.Type, *parsedPackage) {
packageIdent, ok := t.X.(*ast.Ident)
if !ok {
u.Fatalf(t.Pos(), "unexpected non-ident for SelectorExpr.X")
}
pkgName := packageIdent.Name
typeName := t.Sel.Name
importPath := u.lookupPackageImportPath(pkgName)
if importPath == "" {
u.Fatalf(t.Pos(), "cannot find source for type %q that references package %q (perhaps try -replace)", u, pkgName)
}
opp := u.Ppkg.ensurePackageParsed(importPath)
tt := opp.lookupType(typeName)
if tt == nil {
u.Fatalf(t.Pos(), "could not find type %q in package %q", typeName, importPath)
}
return tt, opp
}
func parseSelector(t *ast.SelectorExpr, srcTypeName string, sec *section, pp *parsedPackage) string {
packageIdent, ok := t.X.(*ast.Ident)
if !ok {
logFatalLinef(pp, t.Pos(), "unexpected non-ident for SelectorExpr.X")
}
pkgName := packageIdent.Name
typeName := t.Sel.Name
if pkgName == "time" && typeName == "Time" {
return "timestamp"
}
if pkgName == "sherpa" {
switch typeName {
case "Int64s":
return "int64s"
case "Uint64s":
return "uint64s"
}
}
importPath := pp.lookupPackageImportPath(srcTypeName, pkgName)
if importPath == "" {
logFatalLinef(pp, t.Pos(), "cannot find source for %q (perhaps try -replace)", fmt.Sprintf("%s.%s", pkgName, typeName))
}
opp := pp.ensurePackageParsed(importPath)
tt := opp.lookupType(typeName)
if tt == nil {
logFatalLinef(pp, t.Pos(), "could not find type %q in package %q", typeName, importPath)
}
ensureNamedType(tt, sec, opp)
return typeName
}
type replacement struct {
original string // a Go type, eg "pkg.Type" or "*pkg.Type"
target typewords
}
var _replacements []replacement
func typeReplacements() []replacement {
if _replacements != nil {
return _replacements
}
_replacements = []replacement{}
for _, repl := range strings.Split(*replace, ",") {
if repl == "" {
continue
}
tokens := strings.Split(repl, " ")
if len(tokens) < 2 {
log.Fatalf("bad replacement %q, must have at least two tokens, space-separated", repl)
}
r := replacement{tokens[0], tokens[1:]}
_replacements = append(_replacements, r)
}
return _replacements
}
// Use of a type Name from package Ppkg. Used to look up references from that
// location (the file where the type is defined, with its imports) for a given Go
// ast.
type useSrc struct {
Ppkg *parsedPackage
Name string
}
func (u useSrc) lookupPackageImportPath(pkgName string) string {
return u.Ppkg.lookupPackageImportPath(u.Name, pkgName)
}
func (u useSrc) String() string {
return fmt.Sprintf("%s.%s", u.Ppkg.Path, u.Name)
}
func (u useSrc) Fatalf(tok token.Pos, format string, args ...interface{}) {
logFatalLinef(u.Ppkg, tok, format, args...)
}
// Return a go type name, eg "*time.Time".
// This function does not parse the types itself, because it would mean they could
// be added to the sherpadoc output even if they aren't otherwise used (due to
// replacement).
func goTypeName(u useSrc, e ast.Expr) string {
switch t := e.(type) {
case *ast.Ident:
return t.Name
case *ast.ArrayType:
return "[]" + goTypeName(u, t.Elt)
case *ast.Ellipsis:
// Ellipsis parameters to a function must be passed as an array, so document it that way.
return "[]" + goTypeName(u, t.Elt)
case *ast.MapType:
return fmt.Sprintf("map[%s]%s", goTypeName(u, t.Key), goTypeName(u, t.Value))
case *ast.InterfaceType:
return "interface{}"
case *ast.StarExpr:
return "*" + goTypeName(u, t.X)
case *ast.SelectorExpr:
packageIdent, ok := t.X.(*ast.Ident)
if !ok {
u.Fatalf(t.Pos(), "unexpected non-ident for SelectorExpr.X")
}
pkgName := packageIdent.Name
typeName := t.Sel.Name
importPath := u.lookupPackageImportPath(pkgName)
if importPath != "" {
return fmt.Sprintf("%s.%s", importPath, typeName)
}
return fmt.Sprintf("%s.%s", pkgName, typeName)
// todo: give proper error message for *ast.StructType
}
u.Fatalf(e.Pos(), "unimplemented ast.Expr %#v in goTypeName", e)
return ""
}
func checkReplacedType(u useSrc, e ast.Expr) typewords {
repls := typeReplacements()
if len(repls) == 0 {
return nil
}
name := goTypeName(u, e)
return replacementType(repls, name)
}
func replacementType(repls []replacement, name string) typewords {
for _, repl := range repls {
if repl.original == name {
return repl.target
}
}
return nil
}
// Ensures the package for importPath has been parsed at least once, and return it.
func (pp *parsedPackage) ensurePackageParsed(importPath string) *parsedPackage {
r := pp.Imports[importPath]
if r != nil {
return r
}
var localPath string
var astPkg *ast.Package
var fset *token.FileSet
// If dependencies are vendored, we load packages from vendor/. This is typically
// faster than using package.Load (the fallback), which may spawn commands.
// For me, while testing, for loading a simple package from the same module goes
// from 50-100 ms to 1-5ms. Loading "net" from 200ms to 65ms.
if gomodFile != nil {
if importPath == gomodFile.Module.Mod.Path {
localPath = gomodDir
} else if strings.HasPrefix(importPath, gomodFile.Module.Mod.Path+"/") {
localPath = filepath.Join(gomodDir, strings.TrimPrefix(importPath, gomodFile.Module.Mod.Path+"/"))
} else {
p := filepath.Join(gomodDir, "vendor", importPath)
if _, err := os.Stat(p); err == nil {
localPath = p
} else {
localPath = filepath.Join(runtime.GOROOT(), "src", importPath)
}
}
fset = token.NewFileSet()
astPkgs, err := parser.ParseDir(fset, localPath, nil, parser.ParseComments|parser.DeclarationErrors)
check(err, "parsing go files from "+localPath)
for name, pkg := range astPkgs {
if strings.HasSuffix(name, "_test") {
continue
}
if astPkg != nil {
log.Fatalf("loading package %q: multiple packages found", importPath)
}
astPkg = pkg
}
} else {
config := &packages.Config{
Mode: packages.NeedName | packages.NeedFiles,
}
pkgs, err := packages.Load(config, importPath)
check(err, "loading package")
if len(pkgs) != 1 {
log.Fatalf("loading package %q: got %d packages, expected 1", importPath, len(pkgs))
}
pkg := pkgs[0]
if len(pkg.GoFiles) == 0 {
log.Fatalf("loading package %q: no go files found", importPath)
}
fset = token.NewFileSet()
localPath = filepath.Dir(pkg.GoFiles[0])
astPkgs, err := parser.ParseDir(fset, localPath, nil, parser.ParseComments)
check(err, "parsing go files from directory")
var ok bool
astPkg, ok = astPkgs[pkg.Name]
if !ok {
log.Fatalf("loading package %q: could not find astPkg for %q", importPath, pkg.Name)
}
}
docpkg := doc.New(astPkg, "", doc.AllDecls|doc.PreserveAST)
npp := &parsedPackage{
Fset: fset,
Path: localPath,
Pkg: astPkg,
Docpkg: docpkg,
Imports: make(map[string]*parsedPackage),
}
pp.Imports[importPath] = npp
return npp
}
// LookupPackageImportPath returns the import/package path for pkgName as used as
// used in the type named typeName.
func (pp *parsedPackage) lookupPackageImportPath(typeName, pkgName string) string {
file := pp.lookupTypeFile(typeName)
for _, imp := range file.Imports {
if imp.Name != nil && imp.Name.Name == pkgName || imp.Name == nil && (parseStringLiteral(imp.Path.Value) == pkgName || strings.HasSuffix(parseStringLiteral(imp.Path.Value), "/"+pkgName)) {
return parseStringLiteral(imp.Path.Value)
}
}
return ""
}
// LookupTypeFile returns the go source file that containst he definition of the type named typeName.
func (pp *parsedPackage) lookupTypeFile(typeName string) *ast.File {
for _, file := range pp.Pkg.Files {
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.GenDecl:
for _, spec := range d.Specs {
switch s := spec.(type) {
case *ast.TypeSpec:
if s.Name.Name == typeName {
return file
}
}
}
}
}
}
log.Fatalf("could not find type %q", fmt.Sprintf("%s.%s", pp.Path, typeName))
return nil
}
// Populate "params" with the arguments from "fields", which are function parameters or return type.
func parseArgs(params *[]sherpadoc.Arg, fields *ast.FieldList, sec *section, pp *parsedPackage, isParams bool) {
if fields == nil {
return
}
addParam := func(name string, tw typewords) {
param := sherpadoc.Arg{Name: name, Typewords: tw}
*params = append(*params, param)
}
for _, f := range fields.List {
typ := parseArgType(f.Type, sec, pp)
// Handle named params. Can be both arguments to a function or return types.
for _, name := range f.Names {
addParam(name.Name, typ)
}
// Return types often don't have a name, don't forget them.
if len(f.Names) == 0 {
addParam("", typ)
}
}
for i, p := range *params {
if p.Typewords[len(p.Typewords)-1] != "error" {
continue
}
if isParams || i != len(*params)-1 {
logFatalLinef(pp, fields.Pos(), "can only have error type as last return value")
}
pp := *params
*params = pp[:len(pp)-1]
}
}
func adjustFunctionName(s string) string {
switch *adjustFunctionNames {
case "":
return strings.ToLower(s[:1]) + s[1:]
case "none":
return s
case "lowerWord":
r := ""
for i, c := range s {
lc := unicode.ToLower(c)
if lc == c {
r += s[i:]
break
}
r += string(lc)
}
return r
default:
panic(fmt.Sprintf("bad value for flag adjust-function-names: %q", *adjustFunctionNames))
}
}
// ParseMethod ensures the function fn from package pp ends up in section sec, with parameters/return named types filled in.
func parseMethod(sec *section, fn *doc.Func, pp *parsedPackage) {
f := &function{
Name: adjustFunctionName(fn.Name),
Text: fn.Doc,
Params: []sherpadoc.Arg{},
Returns: []sherpadoc.Arg{},
}
// If first function parameter is context.Context, we skip it in the documentation.
// The sherpa handler automatically fills it with the http request context when called.
params := fn.Decl.Type.Params
if params != nil && len(params.List) > 0 && len(params.List[0].Names) == 1 && goTypeName(useSrc{pp, sec.Name}, params.List[0].Type) == "context.Context" {
params.List = params.List[1:]
}
isParams := true
parseArgs(&f.Params, params, sec, pp, isParams)
isParams = false
parseArgs(&f.Returns, fn.Decl.Type.Results, sec, pp, isParams)
sec.Functions = append(sec.Functions, f)
}

View File

@ -0,0 +1,85 @@
package main
import (
"fmt"
"strings"
"github.com/mjl-/sherpadoc"
)
func sherpaSection(sec *section) *sherpadoc.Section {
doc := &sherpadoc.Section{
Name: sec.Name,
Docs: sec.Text,
Functions: []*sherpadoc.Function{},
Sections: []*sherpadoc.Section{},
Structs: []sherpadoc.Struct{},
Ints: []sherpadoc.Ints{},
Strings: []sherpadoc.Strings{},
}
for _, t := range sec.Types {
switch t.Kind {
case typeStruct:
tt := sherpadoc.Struct{
Name: t.Name,
Docs: t.Text,
Fields: []sherpadoc.Field{},
}
for _, f := range t.Fields {
ff := sherpadoc.Field{
Name: f.Name,
Docs: f.Doc,
Typewords: f.Typewords,
}
tt.Fields = append(tt.Fields, ff)
}
doc.Structs = append(doc.Structs, tt)
case typeInts:
e := sherpadoc.Ints{
Name: t.Name,
Docs: strings.TrimSpace(t.Text),
Values: t.IntValues,
}
doc.Ints = append(doc.Ints, e)
case typeStrings:
e := sherpadoc.Strings{
Name: t.Name,
Docs: strings.TrimSpace(t.Text),
Values: t.StringValues,
}
doc.Strings = append(doc.Strings, e)
case typeBytes:
// todo: hack. find proper way to docment them. better for larger functionality: add generic support for lists of types. for now we'll fake this being a string...
e := sherpadoc.Strings{
Name: t.Name,
Docs: strings.TrimSpace(t.Text),
Values: []struct{Name string; Value string; Docs string}{},
}
doc.Strings = append(doc.Strings, e)
default:
panic("missing case")
}
}
for _, fn := range sec.Functions {
// Ensure returns always have a name. Go can leave them nameless.
// Either they all have names or they don't, so the names we make up will never clash.
for i := range fn.Returns {
if fn.Returns[i].Name == "" {
fn.Returns[i].Name = fmt.Sprintf("r%d", i)
}
}
f := &sherpadoc.Function{
Name: fn.Name,
Docs: strings.TrimSpace(fn.Text),
Params: fn.Params,
Returns: fn.Returns,
}
doc.Functions = append(doc.Functions, f)
}
for _, subsec := range sec.Sections {
doc.Sections = append(doc.Sections, sherpaSection(subsec))
}
doc.Docs = strings.TrimSpace(doc.Docs)
return doc
}

84
vendor/github.com/mjl-/sherpadoc/sherpadoc.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
// Package sherpadoc contains types for reading and writing documentation for sherpa API's.
package sherpadoc
const (
// SherpadocVersion is the sherpadoc version generated by this command.
SherpadocVersion = 1
)
// Section represents documentation about a Sherpa API section, as returned by the "_docs" function.
type Section struct {
Name string // Name of an API section.
Docs string // Explanation of the API in text or markdown.
Functions []*Function // Functions in this section.
Sections []*Section // Subsections, each with their own documentation.
Structs []Struct // Structs as named types.
Ints []Ints // Int enums as named types.
Strings []Strings // String enums used as named types.
Version string `json:",omitempty"` // Version if this API, only relevant for the top-level section of an API. Typically filled in by server at startup.
SherpaVersion int // Version of sherpa this API implements. Currently at 0. Typically filled in by server at startup.
SherpadocVersion int `json:",omitempty"` // Version of the sherpadoc format. Currently at 1, the first defined version. Only relevant for the top-level section of an API.
}
// Function contains the documentation for a single function.
type Function struct {
Name string // Name of the function.
Docs string // Text or markdown, describing the function, its parameters, return types and possible errors.
Params []Arg
Returns []Arg
}
// Arg is the name and type of a function parameter or return value.
//
// Production rules:
//
// basictype := "bool" | "int8", "uint8" | "int16" | "uint16" | "int32" | "uint32" | "int64" | "uint64" | "int64s" | "uint64s" | "float32" | "float64" | "string" | "timestamp"
// array := "[]"
// map := "{}"
// identifier := [a-zA-Z][a-zA-Z0-9]*
// type := "nullable"? ("any" | basictype | identifier | array type | map type)
//
// It is not possible to have inline structs in an Arg. Those must be encoded as a
// named type.
type Arg struct {
Name string // Name of the argument.
Typewords []string // Typewords is an array of tokens describing the type.
}
// Struct is a named compound type.
type Struct struct {
Name string
Docs string
Fields []Field
}
// Field is a single field of a struct type.
// The type can reference another named type.
type Field struct {
Name string
Docs string
Typewords []string
}
// Ints is a type representing an enum with integers as types.
type Ints struct {
Name string
Docs string
Values []struct {
Name string
Value int
Docs string
}
}
// Strings is a type representing an enum with strings as values.
type Strings struct {
Name string
Docs string
Values []struct {
Name string
Value string
Docs string
}
}

8
vendor/github.com/mjl-/sherpaprom/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,8 @@
Copyright 2017 Irias Informatiemanagement
Copyright 2019 Mechiel Lukkien
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

13
vendor/github.com/mjl-/sherpaprom/README.md generated vendored Normal file
View File

@ -0,0 +1,13 @@
# sherpaprom
Go package with a Prometheus [1] collector for Sherpa API's [2,3]. It provides a prometheus collector that implements interface Collector.
Read the godoc documentation at https://godoc.org/github.com/mjl-/sherpaprom
[1] Prometheus: https://prometheus.io/
[2] Sherpa protocol: https://www.ueber.net/who/mjl/sherpa/
[3] Sherpa Go package: https://github.com/mjl-/sherpa
# LICENSE
Created by Mechiel Lukkien, originally at Irias, and released under an MIT-license, see LICENSE.md.

123
vendor/github.com/mjl-/sherpaprom/collector.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
// Package sherpaprom provides a collector of statistics for incoming Sherpa requests that are exported over to Prometheus.
package sherpaprom
import (
"github.com/prometheus/client_golang/prometheus"
)
// Collector implements the Collector interface from the sherpa package.
type Collector struct {
requests, errors *prometheus.CounterVec
protocolErrors, badFunction, javascript, json prometheus.Counter
requestDuration *prometheus.HistogramVec
}
// NewCollector creates a new collector for the named API.
// Metrics will be labeled with "api".
// The following prometheus metrics are automatically registered on reg, or the default prometheus registerer if reg is nil:
//
// sherpa_requests_total
// calls, per function
// sherpa_errors_total
// error responses, per function,code
// sherpa_protocol_errors_total
// incorrect requests
// sherpa_bad_function_total
// unknown functions called
// sherpa_javascript_request_total
// requests to sherpa.js
// sherpa_json_request_total
// requests to sherpa.json
// sherpa_requests_duration_seconds
// histogram for .01, .05, .1, .2, .5, 1, 2, 4, 8, 16, per function
func NewCollector(api string, reg prometheus.Registerer) (*Collector, error) {
if reg == nil {
reg = prometheus.DefaultRegisterer
}
apiLabel := prometheus.Labels{"api": api}
c := &Collector{
requests: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "sherpa_requests_total",
Help: "Total sherpa requests.",
ConstLabels: apiLabel,
}, []string{"function"}),
errors: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "sherpa_errors_total",
Help: "Total sherpa error responses.",
ConstLabels: apiLabel,
}, []string{"function", "code"}),
protocolErrors: prometheus.NewCounter(prometheus.CounterOpts{
Name: "sherpa_protocol_errors_total",
Help: "Total sherpa protocol errors.",
ConstLabels: apiLabel,
}),
badFunction: prometheus.NewCounter(prometheus.CounterOpts{
Name: "sherpa_bad_function_total",
Help: "Total sherpa bad function calls.",
ConstLabels: apiLabel,
}),
javascript: prometheus.NewCounter(prometheus.CounterOpts{
Name: "sherpa_javascript_request_total",
Help: "Total sherpa.js requests.",
ConstLabels: apiLabel,
}),
json: prometheus.NewCounter(prometheus.CounterOpts{
Name: "sherpa_json_requests_total",
Help: "Total sherpa.json requests.",
ConstLabels: apiLabel,
}),
requestDuration: prometheus.NewHistogramVec(prometheus.HistogramOpts{
Name: "sherpa_requests_duration_seconds",
Help: "Sherpa request duration in seconds.",
ConstLabels: apiLabel,
Buckets: []float64{.01, .05, .1, .2, .5, 1, 2, 4, 8, 16},
}, []string{"function"}),
}
first := func(errors ...error) error {
for _, err := range errors {
if err != nil {
return err
}
}
return nil
}
err := first(
reg.Register(c.requests),
reg.Register(c.errors),
reg.Register(c.protocolErrors),
reg.Register(c.badFunction),
reg.Register(c.javascript),
reg.Register(c.json),
reg.Register(c.requestDuration),
)
return c, err
}
// BadFunction increases counter "sherpa_bad_function_total" by one.
func (c *Collector) BadFunction() {
c.badFunction.Inc()
}
// ProtocolError increases counter "sherpa_protocol_errors_total" by one.
func (c *Collector) ProtocolError() {
c.protocolErrors.Inc()
}
// JSON increases "sherpa_json_requests_total" by one.
func (c *Collector) JSON() {
c.json.Inc()
}
// JavaScript increases "sherpa_javascript_requests_total" by one.
func (c *Collector) JavaScript() {
c.javascript.Inc()
}
// FunctionCall increases "sherpa_requests_total" by one, adds the call duration to "sherpa_requests_duration_seconds" and possibly increases "sherpa_error_total" and "sherpa_servererror_total".
func (c *Collector) FunctionCall(name string, duration float64, errorCode string) {
c.requests.WithLabelValues(name).Inc()
if errorCode != "" {
c.errors.WithLabelValues(name, errorCode).Inc()
}
c.requestDuration.WithLabelValues(name).Observe(duration)
}

1
vendor/github.com/mjl-/xfmt/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
/xfmt

7
vendor/github.com/mjl-/xfmt/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2019 Mechiel Lukkien
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

26
vendor/github.com/mjl-/xfmt/README.txt generated vendored Normal file
View File

@ -0,0 +1,26 @@
xfmt formats long lines, playing nice with text in code.
To install:
go get github.com/mjl-/xfmt/cmd/xfmt
Xfmt reads from stdin, writes formatted output to stdout.
Xfmt wraps long lines at 80 characters, configurable through -width. But it
counts text width excluding indenting and markup. Fmt formats to a max line
length that includes indenting. We don't care about total max line length
nowadays, we care about a human readable paragraph, which has a certain text
width regardless of indent.
Xfmt recognizes lines with first non-whitespace of "//" and "#" as line
comments, and repeats that prefix on later lines.
Xfmt keep does not merge lines if the first non-prefix text starts with
interpunction or numbers. E.g. "- item1" or "1. point 1".
Xfmt does not merge multiple spaces, it assumes you intended what you typed.
# todo
- possibly recognize itemized lists in comments and indent the later lines with whitespace
- something else

207
vendor/github.com/mjl-/xfmt/xfmt.go generated vendored Normal file
View File

@ -0,0 +1,207 @@
// Package xfmt reformats text, wrapping it while recognizing comments.
package xfmt
import (
"bufio"
"fmt"
"io"
"strings"
)
// Config tells format how to reformat text.
type Config struct {
MaxWidth int // Max width of content (excluding indenting), after which lines are wrapped.
BreakPrefixes []string // String prefixes that cause a line to break, instead of being merged into the previous line.
}
// Format reads text from r and writes reformatted text to w, according to
// instructions in config. Lines ending with \r\n are formatted with \r\n as well.
func Format(w io.Writer, r io.Reader, config Config) error {
f := &formatter{
in: bufio.NewReader(r),
out: bufio.NewWriter(w),
config: config,
}
return f.format()
}
type formatter struct {
in *bufio.Reader
out *bufio.Writer
config Config
curLine string
curLineend string
}
type parseError error
func (f *formatter) format() (rerr error) {
defer func() {
e := recover()
if e != nil {
if pe, ok := e.(parseError); ok {
rerr = pe
} else {
panic(e)
}
}
}()
for {
line, end := f.gatherLine()
if line == "" && end == "" {
break
}
prefix, rem := parseLine(line)
for _, s := range f.splitLine(rem) {
f.write(prefix)
f.write(s)
f.write(end)
}
}
return f.out.Flush()
}
func (f *formatter) check(err error, action string) {
if err != nil {
panic(parseError(fmt.Errorf("%s: %s", action, err)))
}
}
func (f *formatter) write(s string) {
_, err := f.out.Write([]byte(s))
f.check(err, "write")
}
func (f *formatter) peekLine() (string, string) {
if f.curLine != "" || f.curLineend != "" {
return f.curLine, f.curLineend
}
line, err := f.in.ReadString('\n')
if err != io.EOF {
f.check(err, "read")
}
if line == "" {
return "", ""
}
if strings.HasSuffix(line, "\r\n") {
f.curLine, f.curLineend = line[:len(line)-2], "\r\n"
} else if strings.HasSuffix(line, "\n") {
f.curLine, f.curLineend = line[:len(line)-1], "\n"
} else {
f.curLine, f.curLineend = line, ""
}
return f.curLine, f.curLineend
}
func (f *formatter) consumeLine() {
if f.curLine == "" && f.curLineend == "" {
panic("bad")
}
f.curLine = ""
f.curLineend = ""
}
func (f *formatter) gatherLine() (string, string) {
var curLine, curLineend string
var curPrefix string
for {
line, end := f.peekLine()
if line == "" && end == "" {
break
}
if curLine == "" {
curLineend = end
}
prefix, rem := parseLine(line)
if prefix == "" && rem == "" {
if curLine == "" {
f.consumeLine()
}
break
}
if curLine != "" && (curPrefix != prefix || rem == "" || f.causeBreak(rem)) {
break
}
curPrefix = prefix
if curLine != "" {
curLine += " "
}
curLine += rem
f.consumeLine()
// Control at begin or end of line are not merged.
if curLine != "" && curLine[len(curLine)-1] < 0x20 {
break
}
}
return curPrefix + curLine, curLineend
}
func (f *formatter) causeBreak(s string) bool {
c := s[0]
if c < 0x20 {
return true
}
for _, ss := range f.config.BreakPrefixes {
if strings.HasPrefix(s, ss) {
return true
}
}
// Don't merge lines starting with eg "1. ".
for i, c := range s {
if c >= '0' && c <= '9' {
continue
}
if i > 0 && c == '.' && strings.HasPrefix(s[i:], ". ") {
return true
}
break
}
return false
}
func parseLine(s string) (string, string) {
orig := s
s = strings.TrimLeft(orig, " \t")
prefix := orig[:len(orig)-len(s)]
if strings.HasPrefix(s, "//") {
prefix += "//"
s = s[2:]
} else if strings.HasPrefix(s, "#") {
prefix += "#"
s = s[1:]
}
ns := strings.TrimLeft(s, " \t")
prefix += s[:len(s)-len(ns)]
s = ns
return prefix, s
}
func (f *formatter) splitLine(s string) []string {
if len(s) <= f.config.MaxWidth {
return []string{s}
}
line := ""
r := []string{}
for _, w := range strings.Split(s, " ") {
if line != "" && len(line)+1+len(w) > f.config.MaxWidth {
r = append(r, line)
line = w
continue
}
if line != "" {
line += " "
}
line += w
}
if line != "" {
r = append(r, line)
}
return r
}

201
vendor/github.com/prometheus/client_golang/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
vendor/github.com/prometheus/client_golang/NOTICE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Prometheus instrumentation library for Go applications
Copyright 2012-2015 The Prometheus Authors
This product includes software developed at
SoundCloud Ltd. (http://soundcloud.com/).
The following components are included in this product:
perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
See https://github.com/beorn7/perks/blob/master/README.md for license details.
Go support for Protocol Buffers - Google's data interchange format
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
Support for streaming Protocol Buffer messages for the Go language (golang).
https://github.com/matttproud/golang_protobuf_extensions
Copyright 2013 Matt T. Proud
Licensed under the Apache License, Version 2.0

View File

@ -0,0 +1 @@
command-line-arguments.test

View File

@ -0,0 +1 @@
See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).

View File

@ -0,0 +1,38 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "runtime/debug"
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View File

@ -0,0 +1,128 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection. See Registerer.Register.
//
// The stock metrics provided by this package (Gauge, Counter, Summary,
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
// namely itself). An implementer of Collector may, however, collect multiple
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
// for collectors already implemented in this library are the metric vectors
// (i.e. collection of multiple instances of the same Metric but with different
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc
// documentation.
//
// It is valid if one and the same Collector sends duplicate
// descriptors. Those duplicates are simply ignored. However, two
// different Collectors must not send duplicate descriptors.
//
// Sending no descriptor at all marks the Collector as “unchecked”,
// i.e. no checks will be performed at registration time, and the
// Collector may yield any Metric it sees fit in its Collect method.
//
// This method idempotently sends the same descriptors throughout the
// lifetime of the Collector. It may be called concurrently and
// therefore must be implemented in a concurrency safe way.
//
// If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to
// signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by Describe
// (unless the Collector is unchecked, see above). Returned metrics that
// share the same descriptor must differ in their variable label
// values.
//
// This method may be called concurrently and must therefore be
// implemented in a concurrency safe way. Blocking occurs at the expense
// of total performance of rendering all registered metrics. Ideally,
// Collector implementations support concurrent readers.
Collect(chan<- Metric)
}
// DescribeByCollect is a helper to implement the Describe method of a custom
// Collector. It collects the metrics from the provided Collector and sends
// their descriptors to the provided channel.
//
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
// func (c customCollector) Describe(ch chan<- *Desc) {
// DescribeByCollect(c, ch)
// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
// changes as well. The shortcut implementation will then violate the contract
// of the Describe method. If a Collector sometimes collects no metrics at all
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
// metrics after a metric with a fully specified label set has been accessed),
// it might even get registered as an unchecked Collector (cf. the Register
// method of the Registerer interface). Hence, only use this shortcut
// implementation of Describe if you are certain to fulfill the contract.
//
// The Collector example demonstrates a use of DescribeByCollect.
func DescribeByCollect(c Collector, descs chan<- *Desc) {
metrics := make(chan Metric)
go func() {
c.Collect(metrics)
close(metrics)
}()
for m := range metrics {
descs <- m.Desc()
}
}
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
type selfCollector struct {
self Metric
}
// init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
func (c *selfCollector) init(self Metric) {
c.self = self
}
// Describe implements Collector.
func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View File

@ -0,0 +1,328 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
// Counter is a Metric that represents a single numerical value that only ever
// goes up. That implies that it cannot be used to count items whose number can
// also go down, e.g. the number of currently running goroutines. Those
// "counters" are represented by Gauges.
//
// A Counter is typically used to count requests served, tasks completed, errors
// occurred, etc.
//
// To create Counter instances, use NewCounter.
type Counter interface {
Metric
Collector
// Inc increments the counter by 1. Use Add to increment it by arbitrary
// non-negative values.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
Add(float64)
}
// ExemplarAdder is implemented by Counters that offer the option of adding a
// value to the Counter together with an exemplar. Its AddWithExemplar method
// works like the Add method of the Counter interface but also replaces the
// currently saved exemplar (if any) with a new one, created from the provided
// value, the current time as timestamp, and the provided labels. Empty Labels
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
// than 128 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
// perform the corresponding type assertion.
//
// The returned implementation tracks the counter value in two separate
// variables, a float64 and a uint64. The latter is used to track calls of the
// Inc method and calls of the Add method with a value that can be represented
// as a uint64. This allows atomic increments of the counter with optimal
// performance. (It is common to have an Inc call in very hot execution paths.)
// Both internal tracking values are added up in the Write method. This has to
// be taken into account when it comes to precision and overflow behavior.
func NewCounter(opts CounterOpts) Counter {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
)
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
result.init(result) // Init self-collection.
return result
}
type counter struct {
// valBits contains the bits of the represented float64 value, while
// valInt stores values that are exact integers. Both have to go first
// in the struct to guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
valInt uint64
selfCollector
desc *Desc
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (c *counter) Desc() *Desc {
return c.desc
}
func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
ival := uint64(v)
if float64(ival) == v {
atomic.AddUint64(&c.valInt, ival)
return
}
for {
oldBits := atomic.LoadUint64(&c.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
return
}
}
}
func (c *counter) AddWithExemplar(v float64, e Labels) {
c.Add(v)
c.updateExemplar(v, e)
}
func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
return fval + float64(ival)
}
func (c *counter) Write(out *dto.Metric) error {
// Read the Exemplar first and the value second. This is to avoid a race condition
// where users see an exemplar for a not-yet-existing observation.
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
val := c.get()
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
}
func (c *counter) updateExemplar(v float64, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, c.now(), l)
if err != nil {
panic(err)
}
c.exemplar.Store(e)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
type CounterVec struct {
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
return result
}),
}
}
// GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created.
//
// It is possible to call this method without using the returned Counter to only
// create the new Counter but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Counter for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Counter from the CounterVec. In that case,
// the Counter will still exist, but it will not be exported anymore, even if a
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
//
// myVec.WithLabelValues("404", "GET").Add(42)
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
c, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return c
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
//
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *CounterVec) With(labels Labels) Counter {
c, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return c
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the CounterVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &CounterVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
}
// CounterFunc is a Counter whose value is determined at collect time by calling a
// provided function.
//
// To create CounterFunc instances, use NewCounterFunc.
type CounterFunc interface {
Metric
Collector
}
// NewCounterFunc creates a new CounterFunc based on the provided
// CounterOpts. The value reported is determined by calling the given function
// from within the Write method. Take into account that metric collection may
// happen concurrently. If that results in concurrent calls to Write, like in
// the case where a CounterFunc is directly registered with Prometheus, the
// provided function must be concurrency-safe. The function should also honor
// the contract for a Counter (values only go up, not down), but compliance will
// not be checked.
//
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), CounterValue, function)
}

View File

@ -0,0 +1,189 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/client_golang/prometheus/internal"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to
// deal with Desc if they use advanced features like the ExpvarCollector or
// custom Collectors and Metrics.
//
// Descriptors registered with the same registry have to fulfill certain
// consistency and uniqueness criteria if they share the same fully-qualified
// name: They must have the same help string and the same label names (aka label
// dimensions) in each, constLabels and variableLabels, but they must differ in
// the values of the constLabels.
//
// Descriptors that share the same fully-qualified names and the same label
// values of their constLabels are considered equal.
//
// Use NewDesc to create new Desc instances.
type Desc struct {
// fqName has been built from Namespace, Subsystem, and Name.
fqName string
// help provides some helpful information about this metric.
help string
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
id uint64
// dimHash is a hash of the label names (preset and variable) and the
// Help string. Each Desc with the same fqName must have the same
// dimHash.
dimHash uint64
// err is an error that occurred during construction. It is reported on
// registration time.
err error
}
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName must not be empty.
//
// variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
variableLabels: variableLabels,
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
// labelValues contains the label values of const labels (in order of
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d
}
labelNames = append(labelNames, labelName)
labelNameSet[labelName] = struct{}{}
}
sort.Strings(labelNames)
// ... so that we can now add const label values in the order of their names.
for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName])
}
// Validate the const label values. They can't have a wrong cardinality, so
// use in len(labelValues) as expectedNumberOfValues.
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
d.err = err
return d
}
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d
}
labelNames = append(labelNames, "$"+labelName)
labelNameSet[labelName] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
d.err = errors.New("duplicate label names")
return d
}
xxh := xxhash.New()
for _, val := range labelValues {
xxh.WriteString(val)
xxh.Write(separatorByteSlice)
}
d.id = xxh.Sum64()
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
xxh.Reset()
xxh.WriteString(help)
xxh.Write(separatorByteSlice)
for _, labelName := range labelNames {
xxh.WriteString(labelName)
xxh.Write(separatorByteSlice)
}
d.dimHash = xxh.Sum64()
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
Name: proto.String(n),
Value: proto.String(v),
})
}
sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
return d
}
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
// provided error set. If a collector returning such a descriptor is registered,
// registration will fail with the provided error. NewInvalidDesc can be used by
// a Collector to signal inability to describe itself.
func NewInvalidDesc(err error) *Desc {
return &Desc{
err: err,
}
}
func (d *Desc) String() string {
lpStrings := make([]string, 0, len(d.constLabelPairs))
for _, lp := range d.constLabelPairs {
lpStrings = append(
lpStrings,
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
d.variableLabels,
)
}

View File

@ -0,0 +1,210 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus is the core instrumentation package. It provides metrics
// primitives to instrument code for monitoring. It also offers a registry for
// metrics. Sub-packages allow to expose the registered metrics via HTTP
// (package promhttp) or push them to a Pushgateway (package push). There is
// also a sub-package promauto, which provides metrics constructors with
// automatic registration.
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
// # A Basic Example
//
// As a starting point, a very basic usage example:
//
// package main
//
// import (
// "log"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// type metrics struct {
// cpuTemp prometheus.Gauge
// hdFailures *prometheus.CounterVec
// }
//
// func NewMetrics(reg prometheus.Registerer) *metrics {
// m := &metrics{
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// }),
// hdFailures: prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// ),
// }
// reg.MustRegister(m.cpuTemp)
// reg.MustRegister(m.hdFailures)
// return m
// }
//
// func main() {
// // Create a non-global registry.
// reg := prometheus.NewRegistry()
//
// // Create new metrics and register them using the custom registry.
// m := NewMetrics(reg)
// // Set values for the new created metrics.
// m.cpuTemp.Set(65.3)
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
// // Expose metrics and custom registry via an HTTP server
// // using the HandleFor function. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
// It register the metrics using a custom registry and exposes them via an HTTP server
// on the /metrics endpoint.
//
// # Metrics
//
// The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage. Furthermore, if you are not concerned with
// fine-grained control of when and how to register metrics with the registry,
// have a look at the promauto package, which will effectively allow you to
// ignore registration altogether in simple cases.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec.
//
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
// and HistogramVec are not.
//
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
//
// # Custom Collectors and constant Metrics
//
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
// glance, a custom Collector seems handy to bundle Metrics for common
// registration (with the prime example of the different metric vectors above,
// which bundle all the metrics of the same name but with different labels).
//
// There is a more involved use case, too: If you already have metrics
// available, created outside of the Prometheus context, you don't need the
// interface of the various Metric types. You essentially want to mirror the
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
// for all metric types with just a float64 as their value: Counter, Gauge, and
// a special “type” called Untyped. Use the latter if you are not sure if the
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
// happens in the Collect method. The Describe method has to return separate
// Desc instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
// you could return no Desc at all, which will mark the Collector “unchecked”.
// No checks are performed at registration time, but metric consistency will
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// errors. Thus, with unchecked Collectors, the responsibility to not collect
// metrics that lead to inconsistencies in the total scrape result lies with the
// implementer of the Collector. While this is not a desirable state, it is
// sometimes necessary. The typical use case is a situation where the exact
// metrics to be returned by a Collector cannot be predicted at registration
// time, but the implementer has sufficient knowledge of the whole system to
// guarantee metric consistency.
//
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
// metrics) as examples that are used in this package itself.
//
// If you just need to call a function to get a single float value to collect as
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
// # Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might cause.
// As suggested by the name, MustRegister panics if an error occurs. With the
// Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data model.
// Inconsistencies are ideally detected at registration time, not at collect
// time. The former will usually be detected at start-up time of a program,
// while the latter will only happen at scrape time, possibly not even on the
// first scrape if the inconsistency only becomes relevant later. That is the
// main reason why a Collector and a Metric have to describe themselves to the
// registry.
//
// So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegisterer variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in the
// same way on a custom registry as the global functions Register and Unregister
// on the default registry.
//
// There are a number of uses for custom registries: You can use registries with
// special properties, see NewPedanticRegistry. You can avoid global state, as
// it is imposed by the DefaultRegisterer. You can use multiple registries at
// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
// Also note that the DefaultRegisterer comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
// # HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
//
// # Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
// # Graphite Bridge
//
// Functions and examples to push metrics from a Gatherer to Graphite can be
// found in the graphite sub-package.
//
// # Other Means of Exposition
//
// More ways of exposing metrics can easily be added by following the approaches
// of the existing implementations.
package prometheus

Some files were not shown because too many files have changed in this diff Show More