mirror of
https://github.com/moby/moby.git
synced 2026-01-11 10:41:43 +00:00
libnetwork/networkdb: add convergence test
Add a property-based test which asserts that a cluster of NetworkDB nodes always eventually converges to a consistent state. As this test takes a long time to run it is build-tagged to be excluded from CI. Signed-off-by: Cory Snider <csnider@mirantis.com>
This commit is contained in:
@@ -3,7 +3,9 @@ package networkdb
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
@@ -43,3 +45,15 @@ func logEncKeys(ctx context.Context, keys ...[]byte) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nDB *NetworkDB) DebugDumpTable(tname string) string {
|
||||
nDB.RLock()
|
||||
root := nDB.indexes[byTable].Root()
|
||||
nDB.RUnlock()
|
||||
var sb strings.Builder
|
||||
root.WalkPrefix([]byte("/"+tname), func(path []byte, v *entry) bool {
|
||||
fmt.Fprintf(&sb, " %q: %+v\n", path, v)
|
||||
return false
|
||||
})
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
287
daemon/libnetwork/networkdb/networkdb_property_test.go
Normal file
287
daemon/libnetwork/networkdb/networkdb_property_test.go
Normal file
@@ -0,0 +1,287 @@
|
||||
//go:build slowtests
|
||||
|
||||
package networkdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"gotest.tools/v3/poll"
|
||||
"pgregory.net/rapid"
|
||||
)
|
||||
|
||||
func TestNetworkDBAlwaysConverges(t *testing.T) {
|
||||
rapid.Check(t, testConvergence)
|
||||
}
|
||||
|
||||
func testConvergence(t *rapid.T) {
|
||||
numNodes := rapid.IntRange(2, 25).Draw(t, "numNodes")
|
||||
numNetworks := rapid.IntRange(1, 5).Draw(t, "numNetworks")
|
||||
|
||||
fsm := &networkDBFSM{
|
||||
nDB: createNetworkDBInstances(t, numNodes, "node", DefaultConfig()),
|
||||
state: make([]map[string]map[string]string, numNodes),
|
||||
keysUsed: make(map[string]map[string]bool),
|
||||
}
|
||||
defer closeNetworkDBInstances(t, fsm.nDB)
|
||||
for i := range fsm.state {
|
||||
fsm.state[i] = make(map[string]map[string]string)
|
||||
}
|
||||
for i := range numNetworks {
|
||||
nw := "nw" + strconv.Itoa(i)
|
||||
fsm.networks = append(fsm.networks, nw)
|
||||
fsm.keysUsed[nw] = make(map[string]bool)
|
||||
}
|
||||
// Drive the NetworkDB instances with a sequence of actions in random order.
|
||||
// We do not check for convergence until afterwards as NetworkDB is an
|
||||
// eventually consistent system.
|
||||
t.Repeat(rapid.StateMachineActions(fsm))
|
||||
|
||||
// Take the union of all entries in all networks owned by all nodes.
|
||||
converged := make(map[string]map[string]string)
|
||||
for _, state := range fsm.state {
|
||||
for network, entries := range state {
|
||||
if converged[network] == nil {
|
||||
converged[network] = make(map[string]string)
|
||||
}
|
||||
maps.Copy(converged[network], entries)
|
||||
}
|
||||
}
|
||||
expected := make(map[string]map[string]map[string]string, numNodes)
|
||||
for i, st := range fsm.state {
|
||||
exp := make(map[string]map[string]string)
|
||||
for k := range st {
|
||||
exp[k] = converged[k]
|
||||
}
|
||||
expected[fsm.nDB[i].config.NodeID] = exp
|
||||
}
|
||||
|
||||
t.Logf("Waiting for NetworkDB state to converge to %#v", converged)
|
||||
for i, st := range fsm.state {
|
||||
t.Logf("Node #%d (%s): %v", i, fsm.nDB[i].config.NodeID, slices.Collect(maps.Keys(st)))
|
||||
}
|
||||
t.Log("Mutations:")
|
||||
for _, m := range fsm.mutations {
|
||||
t.Log(m)
|
||||
}
|
||||
t.Log("---------------------------")
|
||||
|
||||
poll.WaitOn(t, func(t poll.LogT) poll.Result {
|
||||
actualState := make(map[string]map[string]map[string]string, numNodes)
|
||||
for _, nDB := range fsm.nDB {
|
||||
actual := make(map[string]map[string]string)
|
||||
for k, nw := range nDB.thisNodeNetworks {
|
||||
if !nw.leaving {
|
||||
actual[k] = make(map[string]string)
|
||||
}
|
||||
}
|
||||
actualState[nDB.config.NodeID] = actual
|
||||
}
|
||||
tableContent := make([]string, len(fsm.nDB))
|
||||
for i, nDB := range fsm.nDB {
|
||||
tableContent[i] = fmt.Sprintf("Node #%d (%s):\n%v", i, nDB.config.NodeID, nDB.DebugDumpTable("some_table"))
|
||||
nDB.WalkTable("some_table", func(network, key string, value []byte, deleting bool) bool {
|
||||
if deleting {
|
||||
return false
|
||||
}
|
||||
if actualState[nDB.config.NodeID][network] == nil {
|
||||
actualState[nDB.config.NodeID][network] = make(map[string]string)
|
||||
}
|
||||
actualState[nDB.config.NodeID][network][key] = string(value)
|
||||
return false
|
||||
})
|
||||
}
|
||||
diff := cmp.Diff(expected, actualState)
|
||||
if diff != "" {
|
||||
return poll.Continue("NetworkDB state has not converged:\n%v\n%v", diff, strings.Join(tableContent, "\n\n"))
|
||||
}
|
||||
return poll.Success()
|
||||
}, poll.WithTimeout(5*time.Minute), poll.WithDelay(200*time.Millisecond))
|
||||
|
||||
convergenceTime := time.Since(fsm.lastMutation)
|
||||
t.Logf("NetworkDB state converged in %v", convergenceTime)
|
||||
|
||||
// Log the convergence time to disk for later statistical analysis.
|
||||
|
||||
if err := os.Mkdir("testdata", 0755); err != nil && !os.IsExist(err) {
|
||||
t.Logf("Could not log convergence time to disk: failed to create testdata directory: %v", err)
|
||||
return
|
||||
}
|
||||
f, err := os.OpenFile("testdata/convergence_time.csv", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
t.Logf("Could not log convergence time to disk: failed to open file: %v", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
t.Logf("Could not log convergence time to disk: error closing file: %v", err)
|
||||
}
|
||||
}()
|
||||
if st, err := f.Stat(); err != nil {
|
||||
t.Logf("Could not log convergence time to disk: failed to stat file: %v", err)
|
||||
return
|
||||
} else if st.Size() == 0 {
|
||||
f.WriteString("Nodes,Networks,#Mutations,Convergence(ns)\n")
|
||||
}
|
||||
if _, err := fmt.Fprintf(f, "%v,%v,%v,%d\n", numNodes, numNetworks, len(fsm.mutations), convergenceTime); err != nil {
|
||||
t.Logf("Could not log convergence time to disk: failed to write to file: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// networkDBFSM is a [rapid.StateMachine] providing the set of actions available
|
||||
// for rapid to drive NetworkDB with in tests. See also
|
||||
// [rapid.StateMachineActions] and [rapid.Repeat].
|
||||
type networkDBFSM struct {
|
||||
nDB []*NetworkDB
|
||||
networks []string // list of networks which can be joined
|
||||
// node -> joined-network -> key -> value
|
||||
state []map[string]map[string]string
|
||||
|
||||
// Remember entry keys that have been used before to avoid trying to
|
||||
// create colliding keys. Due to how quickly the FSM runs, it is
|
||||
// possible for a node to not have learned that the previous generation
|
||||
// of the key was deleted before we try to create it again.
|
||||
// network -> key -> true
|
||||
keysUsed map[string]map[string]bool
|
||||
|
||||
// Timestamp of the most recent state-machine action which perturbed the
|
||||
// system state.
|
||||
lastMutation time.Time
|
||||
mutations []string
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) mutated(nodeidx int, action, network, key, value string) {
|
||||
u.lastMutation = time.Now()
|
||||
desc := fmt.Sprintf(" [%v] #%d(%v):%v(%s", u.lastMutation, nodeidx, u.nDB[nodeidx].config.NodeID, action, network)
|
||||
if key != "" {
|
||||
desc += fmt.Sprintf(", %s=%s", key, value)
|
||||
}
|
||||
desc += ")"
|
||||
u.mutations = append(u.mutations, desc)
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) Check(t *rapid.T) {
|
||||
// This method is required to implement the [rapid.StateMachine]
|
||||
// interface. But there is nothing much to check stepwise as we are
|
||||
// testing an eventually consistent system. The checks happen after
|
||||
// rapid is done randomly driving the FSM.
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) JoinNetwork(t *rapid.T) {
|
||||
// Pick a node that has not joined all networks...
|
||||
var nodes []int
|
||||
for i, s := range u.state {
|
||||
if len(s) < len(u.networks) {
|
||||
nodes = append(nodes, i)
|
||||
}
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
t.Skip("All nodes are already joined to all networks")
|
||||
}
|
||||
nodeidx := rapid.SampledFrom(nodes).Draw(t, "node")
|
||||
|
||||
// ... and a network to join.
|
||||
networks := slices.DeleteFunc(slices.Clone(u.networks), func(n string) bool {
|
||||
_, ok := u.state[nodeidx][n]
|
||||
return ok
|
||||
})
|
||||
nw := rapid.SampledFrom(networks).Draw(t, "network")
|
||||
|
||||
if err := u.nDB[nodeidx].JoinNetwork(nw); err != nil {
|
||||
t.Errorf("Node %v failed to join network %s: %v", nodeidx, nw, err)
|
||||
} else {
|
||||
u.state[nodeidx][nw] = make(map[string]string)
|
||||
u.mutated(nodeidx, "JoinNetwork", nw, "", "")
|
||||
}
|
||||
}
|
||||
|
||||
// drawJoinedNode returns a random node that has joined at least one network.
|
||||
func (u *networkDBFSM) drawJoinedNodeAndNetwork(t *rapid.T) (nodeidx int, nw string) {
|
||||
var nodes []int
|
||||
for i, s := range u.state {
|
||||
if len(s) > 0 {
|
||||
nodes = append(nodes, i)
|
||||
}
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
t.Skip("No node is joined to any network")
|
||||
}
|
||||
nodeidx = rapid.SampledFrom(nodes).Draw(t, "node")
|
||||
|
||||
nw = rapid.SampledFrom(slices.Collect(maps.Keys(u.state[nodeidx]))).Draw(t, "network")
|
||||
return nodeidx, nw
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) LeaveNetwork(t *rapid.T) {
|
||||
nodeidx, nw := u.drawJoinedNodeAndNetwork(t)
|
||||
if err := u.nDB[nodeidx].LeaveNetwork(nw); err != nil {
|
||||
t.Errorf("Node %v failed to leave network %s: %v", nodeidx, nw, err)
|
||||
} else {
|
||||
delete(u.state[nodeidx], nw)
|
||||
u.mutated(nodeidx, "LeaveNetwork", nw, "", "")
|
||||
}
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) CreateEntry(t *rapid.T) {
|
||||
nodeidx, nw := u.drawJoinedNodeAndNetwork(t)
|
||||
key := rapid.StringMatching(`[a-z]{3,25}`).
|
||||
Filter(func(s string) bool { return !u.keysUsed[nw][s] }).
|
||||
Draw(t, "key")
|
||||
value := rapid.StringMatching(`[a-z]{5,20}`).Draw(t, "value")
|
||||
|
||||
if err := u.nDB[nodeidx].CreateEntry("some_table", nw, key, []byte(value)); err != nil {
|
||||
t.Errorf("Node %v failed to create entry %s=%s in network %s: %v", nodeidx, key, value, nw, err)
|
||||
} else {
|
||||
u.state[nodeidx][nw][key] = value
|
||||
u.keysUsed[nw][key] = true
|
||||
u.mutated(nodeidx, "CreateEntry", nw, key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// drawOwnedDBKey returns a random key in nw owned by the node at nodeidx.
|
||||
func (u *networkDBFSM) drawOwnedDBKey(t *rapid.T, nodeidx int, nw string) string {
|
||||
keys := slices.Collect(maps.Keys(u.state[nodeidx][nw]))
|
||||
if len(keys) == 0 {
|
||||
t.Skipf("Node %v owns no entries in network %s", nodeidx, nw)
|
||||
panic("unreachable")
|
||||
}
|
||||
return rapid.SampledFrom(keys).Draw(t, "key")
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) UpdateEntry(t *rapid.T) {
|
||||
nodeidx, nw := u.drawJoinedNodeAndNetwork(t)
|
||||
key := u.drawOwnedDBKey(t, nodeidx, nw)
|
||||
value := rapid.StringMatching(`[a-z]{5,20}`).Draw(t, "value")
|
||||
|
||||
if err := u.nDB[nodeidx].UpdateEntry("some_table", nw, key, []byte(value)); err != nil {
|
||||
t.Errorf("Node %v failed to update entry %s=%s in network %s: %v", nodeidx, key, value, nw, err)
|
||||
} else {
|
||||
u.state[nodeidx][nw][key] = value
|
||||
u.mutated(nodeidx, "UpdateEntry", nw, key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) DeleteEntry(t *rapid.T) {
|
||||
nodeidx, nw := u.drawJoinedNodeAndNetwork(t)
|
||||
key := u.drawOwnedDBKey(t, nodeidx, nw)
|
||||
|
||||
if err := u.nDB[nodeidx].DeleteEntry("some_table", nw, key); err != nil {
|
||||
t.Errorf("Node %v failed to delete entry %s in network %s: %v", nodeidx, key, nw, err)
|
||||
} else {
|
||||
delete(u.state[nodeidx][nw], key)
|
||||
u.mutated(nodeidx, "DeleteEntry", nw, key, "")
|
||||
}
|
||||
}
|
||||
|
||||
func (u *networkDBFSM) Sleep(t *rapid.T) {
|
||||
duration := time.Duration(rapid.IntRange(10, 500).Draw(t, "duration")) * time.Millisecond
|
||||
time.Sleep(duration)
|
||||
}
|
||||
@@ -34,14 +34,20 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func launchNode(t *testing.T, conf Config) *NetworkDB {
|
||||
type TestingT interface {
|
||||
assert.TestingT
|
||||
poll.TestingT
|
||||
Helper()
|
||||
}
|
||||
|
||||
func launchNode(t TestingT, conf Config) *NetworkDB {
|
||||
t.Helper()
|
||||
db, err := New(&conf)
|
||||
assert.NilError(t, err)
|
||||
return db
|
||||
}
|
||||
|
||||
func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Config) []*NetworkDB {
|
||||
func createNetworkDBInstances(t TestingT, num int, namePrefix string, conf *Config) []*NetworkDB {
|
||||
t.Helper()
|
||||
var dbs []*NetworkDB
|
||||
for i := 0; i < num; i++ {
|
||||
@@ -69,12 +75,12 @@ func createNetworkDBInstances(t *testing.T, num int, namePrefix string, conf *Co
|
||||
}
|
||||
return poll.Success()
|
||||
}
|
||||
poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second))
|
||||
poll.WaitOn(t, check, poll.WithDelay(2*time.Second), poll.WithTimeout(20*time.Second+time.Duration(num-1)*10*time.Second))
|
||||
|
||||
return dbs
|
||||
}
|
||||
|
||||
func closeNetworkDBInstances(t *testing.T, dbs []*NetworkDB) {
|
||||
func closeNetworkDBInstances(t TestingT, dbs []*NetworkDB) {
|
||||
t.Helper()
|
||||
log.G(context.TODO()).Print("Closing DB instances...")
|
||||
for _, db := range dbs {
|
||||
|
||||
@@ -115,6 +115,7 @@ require (
|
||||
google.golang.org/grpc v1.72.2
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gotest.tools/v3 v3.5.2
|
||||
pgregory.net/rapid v1.2.0
|
||||
resenje.org/singleflight v0.4.3
|
||||
tags.cncf.io/container-device-interface v1.0.1
|
||||
)
|
||||
|
||||
@@ -840,6 +840,8 @@ kernel.org/pub/linux/libs/security/libcap/cap v1.2.76 h1:mrdLPj8ujM6eIKGtd1PkkuC
|
||||
kernel.org/pub/linux/libs/security/libcap/cap v1.2.76/go.mod h1:7V2BQeHnVAQwhCnCPJ977giCeGDiywVewWF+8vkpPlc=
|
||||
kernel.org/pub/linux/libs/security/libcap/psx v1.2.76 h1:3DyzQ30OHt3wiOZVL1se2g1PAPJIU7+tMUyvfMUj1dY=
|
||||
kernel.org/pub/linux/libs/security/libcap/psx v1.2.76/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24=
|
||||
pgregory.net/rapid v1.2.0 h1:keKAYRcjm+e1F0oAuU5F5+YPAWcyxNNRK2wud503Gnk=
|
||||
pgregory.net/rapid v1.2.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04=
|
||||
resenje.org/singleflight v0.4.3 h1:l7foFYg8X/VEHPxWs1K/Pw77807RMVzvXgWGb0J1sdM=
|
||||
resenje.org/singleflight v0.4.3/go.mod h1:lAgQK7VfjG6/pgredbQfmV0RvG/uVhKo6vSuZ0vCWfk=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
|
||||
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
@@ -1701,6 +1701,9 @@ k8s.io/klog/v2/internal/dbg
|
||||
k8s.io/klog/v2/internal/serialize
|
||||
k8s.io/klog/v2/internal/severity
|
||||
k8s.io/klog/v2/internal/sloghandler
|
||||
# pgregory.net/rapid v1.2.0
|
||||
## explicit; go 1.18
|
||||
pgregory.net/rapid
|
||||
# resenje.org/singleflight v0.4.3
|
||||
## explicit; go 1.18
|
||||
resenje.org/singleflight
|
||||
|
||||
1
vendor/pgregory.net/rapid/.gitattributes
generated
vendored
Normal file
1
vendor/pgregory.net/rapid/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.go eol=lf
|
||||
13
vendor/pgregory.net/rapid/.gitignore
generated
vendored
Normal file
13
vendor/pgregory.net/rapid/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
/.idea/
|
||||
/.vscode/
|
||||
*.swp
|
||||
|
||||
/rapid.test
|
||||
/rapid.test.exe
|
||||
/cpu.prof
|
||||
/mem.prof
|
||||
/profile*
|
||||
/testdata/
|
||||
|
||||
/doc.html
|
||||
/vis-*.html
|
||||
14
vendor/pgregory.net/rapid/CONTRIBUTING.md
generated
vendored
Normal file
14
vendor/pgregory.net/rapid/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Issues
|
||||
|
||||
Any issues reported are greatly appreciated. Please consider opening an issue
|
||||
not only in case you have encountered a bug, but if *anything* (be it API,
|
||||
functionality, workflow, docs, ...) looks like it can be improved.
|
||||
|
||||
# Pull requests
|
||||
|
||||
Please avoid "improve the code style" kind of pull requests; in particular
|
||||
`if block ends with a return statement, so drop this else and outdent its block`
|
||||
suggestion of `golint` should be ignored.
|
||||
|
||||
If you intend to work on anything non-trivial, please open an issue first,
|
||||
to discuss the design and implementation before writing any code.
|
||||
373
vendor/pgregory.net/rapid/LICENSE
generated
vendored
Normal file
373
vendor/pgregory.net/rapid/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,373 @@
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
||||
203
vendor/pgregory.net/rapid/README.md
generated
vendored
Normal file
203
vendor/pgregory.net/rapid/README.md
generated
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
# rapid [![PkgGoDev][godev-img]][godev] [![CI][ci-img]][ci]
|
||||
|
||||
Rapid is a Go library for property-based testing.
|
||||
|
||||
Rapid checks that properties you define hold for a large number
|
||||
of automatically generated test cases. If a failure is found, rapid
|
||||
automatically minimizes the failing test case before presenting it.
|
||||
|
||||
## Features
|
||||
|
||||
- Imperative Go API with type-safe data generation using generics
|
||||
- Data generation biased to explore "small" values and edge cases more thoroughly
|
||||
- Fully automatic minimization of failing test cases
|
||||
- Persistence and automatic re-running of minimized failing test cases
|
||||
- Support for state machine ("stateful" or "model-based") testing
|
||||
- No dependencies outside the Go standard library
|
||||
|
||||
## Examples
|
||||
|
||||
Here is what a trivial test using rapid looks like ([playground](https://go.dev/play/p/QJhOzo_BByz)):
|
||||
|
||||
```go
|
||||
package rapid_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"pgregory.net/rapid"
|
||||
)
|
||||
|
||||
func TestSortStrings(t *testing.T) {
|
||||
rapid.Check(t, func(t *rapid.T) {
|
||||
s := rapid.SliceOf(rapid.String()).Draw(t, "s")
|
||||
sort.Strings(s)
|
||||
if !sort.StringsAreSorted(s) {
|
||||
t.Fatalf("unsorted after sort: %v", s)
|
||||
}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
More complete examples:
|
||||
|
||||
- `ParseDate` function test:
|
||||
[source code](./example_function_test.go), [playground](https://go.dev/play/p/tZFU8zv8AUl)
|
||||
- `Queue` state machine test:
|
||||
[source code](./example_statemachine_test.go), [playground](https://go.dev/play/p/cxEh4deG-4n)
|
||||
|
||||
## Comparison
|
||||
|
||||
Rapid aims to bring to Go the power and convenience
|
||||
[Hypothesis](https://github.com/HypothesisWorks/hypothesis) brings to Python.
|
||||
|
||||
Compared to [testing.F.Fuzz](https://pkg.go.dev/testing#F.Fuzz), rapid shines
|
||||
in generating complex structured data, including state machine tests, but lacks
|
||||
coverage-guided feedback and mutations. Note that with
|
||||
[`MakeFuzz`](https://pkg.go.dev/pgregory.net/rapid#MakeFuzz), any rapid test
|
||||
can be used as a fuzz target for the standard fuzzer.
|
||||
|
||||
Compared to [gopter](https://pkg.go.dev/github.com/leanovate/gopter), rapid
|
||||
provides a much simpler API (queue test in [rapid](./example_statemachine_test.go) vs
|
||||
[gopter](https://github.com/leanovate/gopter/blob/90cc76d7f1b21637b4b912a7c19dea3efe145bb2/commands/example_circularqueue_test.go)),
|
||||
is much smarter about data generation and is able to minimize failing test cases
|
||||
fully automatically, without any user code.
|
||||
|
||||
As for [testing/quick](https://pkg.go.dev/testing/quick), it lacks both
|
||||
convenient data generation facilities and any form of test case minimization, which
|
||||
are two main things to look for in a property-based testing library.
|
||||
|
||||
## FAQ
|
||||
|
||||
### What is property-based testing?
|
||||
|
||||
Suppose we've written arithmetic functions `add`, `subtract` and `multiply`
|
||||
and want to test them. Traditional testing approach is example-based —
|
||||
we come up with example inputs and outputs, and verify that the system behavior
|
||||
matches the examples:
|
||||
|
||||
```go
|
||||
func TestArithmetic_Example(t *testing.T) {
|
||||
t.Run("add", func(t *testing.T) {
|
||||
examples := [][3]int{
|
||||
{0, 0, 0},
|
||||
{0, 1, 1},
|
||||
{2, 2, 4},
|
||||
// ...
|
||||
}
|
||||
for _, e := range examples {
|
||||
if add(e[0], e[1]) != e[2] {
|
||||
t.Fatalf("add(%v, %v) != %v", e[0], e[1], e[2])
|
||||
}
|
||||
}
|
||||
})
|
||||
t.Run("subtract", func(t *testing.T) { /* ... */ })
|
||||
t.Run("multiply", func(t *testing.T) { /* ... */ })
|
||||
}
|
||||
```
|
||||
|
||||
In comparison, with property-based testing we define higher-level properties
|
||||
that should hold for arbitrary input. Each time we run a property-based test,
|
||||
properties are checked on a new set of pseudo-random data:
|
||||
|
||||
```go
|
||||
func TestArithmetic_Property(t *testing.T) {
|
||||
rapid.Check(t, func(t *rapid.T) {
|
||||
var (
|
||||
a = rapid.Int().Draw(t, "a")
|
||||
b = rapid.Int().Draw(t, "b")
|
||||
c = rapid.Int().Draw(t, "c")
|
||||
)
|
||||
if add(a, 0) != a {
|
||||
t.Fatalf("add() does not have 0 as identity")
|
||||
}
|
||||
if add(a, b) != add(b, a) {
|
||||
t.Fatalf("add() is not commutative")
|
||||
}
|
||||
if add(a, add(b, c)) != add(add(a, b), c) {
|
||||
t.Fatalf("add() is not associative")
|
||||
}
|
||||
if multiply(a, add(b, c)) != add(multiply(a, b), multiply(a, c)) {
|
||||
t.Fatalf("multiply() is not distributive over add()")
|
||||
}
|
||||
// ...
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
Property-based tests are more powerful and concise than example-based ones —
|
||||
and are also much more fun to write. As an additional benefit, coming up with
|
||||
general properties of the system often improves the design of the system itself.
|
||||
|
||||
### What properties should I test?
|
||||
|
||||
As you've seen from the examples above, it depends on the system you are testing.
|
||||
Usually a good place to start is to put yourself in the shoes of your user
|
||||
and ask what are the properties the user will rely on (often unknowingly or
|
||||
implicitly) when building on top of your system. That said, here are some
|
||||
broadly applicable and often encountered properties to keep in mind:
|
||||
|
||||
- function does not panic on valid input data
|
||||
- behavior of two algorithms or data structures is identical
|
||||
- all variants of the `decode(encode(x)) == x` roundtrip
|
||||
|
||||
### How does rapid work?
|
||||
|
||||
At its core, rapid does a fairly simple thing: generates pseudo-random data
|
||||
based on the specification you provide, and check properties that you define
|
||||
on the generated data.
|
||||
|
||||
Checking is easy: you simply write `if` statements and call something like
|
||||
`t.Fatalf` when things look wrong.
|
||||
|
||||
Generating is a bit more involved. When you construct a `Generator`, nothing
|
||||
happens: `Generator` is just a specification of how to `Draw` the data you
|
||||
want. When you call `Draw`, rapid will take some bytes from its internal
|
||||
random bitstream, use them to construct the value based on the `Generator`
|
||||
specification, and track how the random bytes used correspond to the value
|
||||
(and its subparts). This knowledge about the structure of the values being
|
||||
generated, as well as their relationship with the parts of the bitstream
|
||||
allows rapid to intelligently and automatically minify any failure found.
|
||||
|
||||
### What about fuzzing?
|
||||
|
||||
Property-based testing focuses on quick feedback loop: checking the properties
|
||||
on a small but diverse set of pseudo-random inputs in a fractions of a second.
|
||||
|
||||
In comparison, fuzzing focuses on slow, often multi-day, brute force input
|
||||
generation that maximizes the coverage.
|
||||
|
||||
Both approaches are useful. Property-based tests are used alongside regular
|
||||
example-based tests during development, and fuzzing is used to search for edge
|
||||
cases and security vulnerabilities. With
|
||||
[`MakeFuzz`](https://pkg.go.dev/pgregory.net/rapid#MakeFuzz), any rapid test
|
||||
can be used as a fuzz target.
|
||||
|
||||
## Usage
|
||||
|
||||
Just run `go test` as usual, it will pick up also all `rapid` tests.
|
||||
|
||||
There are a number of optional flags to influence rapid behavior, run
|
||||
`go test -args -h` and look at the flags with the `-rapid.` prefix. You can
|
||||
then pass such flags as usual. For example:
|
||||
|
||||
```sh
|
||||
go test -rapid.checks=10_000
|
||||
```
|
||||
|
||||
## Status
|
||||
|
||||
Rapid is stable: tests using rapid should continue to work with all future
|
||||
rapid releases with the same major version. Possible exceptions to this rule
|
||||
are API changes that replace the concrete type of parameter with an interface
|
||||
type, or other similar mostly non-breaking changes.
|
||||
|
||||
## License
|
||||
|
||||
Rapid is licensed under the [Mozilla Public License Version 2.0](./LICENSE).
|
||||
|
||||
[godev-img]: https://pkg.go.dev/badge/pgregory.net/rapid
|
||||
[godev]: https://pkg.go.dev/pgregory.net/rapid
|
||||
[ci-img]: https://github.com/flyingmutant/rapid/workflows/CI/badge.svg
|
||||
[ci]: https://github.com/flyingmutant/rapid/actions
|
||||
94
vendor/pgregory.net/rapid/TODO.md
generated
vendored
Normal file
94
vendor/pgregory.net/rapid/TODO.md
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
# TODO
|
||||
|
||||
## Generators
|
||||
|
||||
- times, durations, locations
|
||||
- complex numbers
|
||||
- big numbers
|
||||
- ip addresses & masks
|
||||
- subset-of-slice
|
||||
- runes with rune/range blacklist
|
||||
- recursive (base + extend)
|
||||
|
||||
## Shrinking
|
||||
|
||||
- make it OK to pivot to a different error when shrinking
|
||||
- right now, we require for traceback to remain the same to continue shrinking, which is probably limiting
|
||||
- floats: maybe shrink towards lower *biased* exponent?
|
||||
- just like we have lower+delete pass to deal with situations like generation/sampling, we need to have a pass for choice
|
||||
- idea: lower (the "choice" block) + fill some region with random data
|
||||
- to try to reproduce with a simpler choice
|
||||
- this should work both OneOf and floats (where exponent is kind of a OneOf key)
|
||||
- questions:
|
||||
- how to deal with misalignment?
|
||||
- how to determine the group to randomize?
|
||||
- e.g. right now for floats it is not an explicit group but rather a bunch of nearby blocks
|
||||
- use fewer bits for genFloat01 to make shrinking a bit faster
|
||||
- shrink duplicates together
|
||||
- generalize to arbitrary "offsets" for pairs
|
||||
- better caching
|
||||
- detect when we are generating already generated values and abort early
|
||||
- not all value groups are standalone!
|
||||
- standalone might be too coarse, maybe should be replaced with a bunch of other traits
|
||||
- we are doing too much prop evaluations
|
||||
- partial sort does not swap e.g. int and int32
|
||||
- when shrinking, if we try to lower the wanted bits of some uint64, we have a high chance to draw very low value
|
||||
- because high bits will be masked out
|
||||
- this can prevent shrinking, when we first lower block A (which e.g. selects the generator), then
|
||||
we draw next block B (which the lowered generator wants fewer bits of). Instead of getting a bit value for B
|
||||
and doing proper search, we end up getting a small one, and abandoning the generator shrink
|
||||
- for order-based passes, try alternating orders?
|
||||
- what order is a better default?
|
||||
- "prefix search" shrinking
|
||||
- when shrinking, why do we leave the tail the same?
|
||||
- we have "misalignment" problems and all that
|
||||
- generate random data instead!
|
||||
- generate random tails all the time
|
||||
- minimize bitstream mis-alignment during shrinking (try to make the shape as constant as possible)
|
||||
- better, make minimization not care about mis-alignment
|
||||
- sticky bitstream?
|
||||
- differentiate groups with structure vs groups without one for smarter shrinking
|
||||
- non-greedy shrink
|
||||
- allow to increase the data size *between shrink passes*, if the net result is good
|
||||
- e.g. allow sort to do arbitrary? swaps
|
||||
- rejection sampling during shrinking leads to data misalignment, is this a problem?
|
||||
- can we detect overruns early and re-roll only the last part of the bitstream?
|
||||
- maybe overwrite bitstream instead of prune?
|
||||
- to never have an un-pruned version
|
||||
- to guarantee? that we can draw values successfully while shrinking (working with bufBitStream)
|
||||
|
||||
## Misc
|
||||
|
||||
- ability to run tests without shrinking (e.g. for running non-deterministic tests)
|
||||
- bitStream -> blockStream?
|
||||
- do not play with filter games for the state machine, just find all valid actions
|
||||
- when generating numbers in range, try to bias based on the min number,
|
||||
just like we bias repeat based on the min number?
|
||||
- because min number defines the "magnitude" of the whole thing, kind of?
|
||||
- so when we are generating numbers in [1000000; +inf) we do not stick with 1000000 too hard
|
||||
- more powerful assume/filter (look at what hypothesis is doing)
|
||||
- incorporate special case checking (bounds esp.)
|
||||
|
||||
## Wild ideas
|
||||
|
||||
- global path-based generation (kind of like simplex method), which makes most of the generators hit corner cases simultaneously
|
||||
- recurrence-based generation, because it is hard to stumble upon interesting stuff purely by random
|
||||
- start generating already generated stuff, overriding random for some number of draws
|
||||
- zip the sequence with itself
|
||||
- random jumps of rng, back/forward
|
||||
- recurrence-based generation may actually be better than usual fuzzing!
|
||||
- because we operate on 64 bits at once, which in most cases correspond to "full value",
|
||||
we have e.g. a much better chance to reproduce a multi-byte sequence (exact or slightly altered) somewhere else
|
||||
- this is kind-of related to go-fuzz versifier in some way
|
||||
- we also can (and should) reuse whole chunks which can correspond to strings/lists/etc.
|
||||
- random markov chain which switches states like
|
||||
- generate new data
|
||||
- reuse existing data starting from
|
||||
- reuse existing data altering it like X
|
||||
- should transition probabilities be universal or depend on generators?
|
||||
- should they also determine where to jump to, so that we jump to "compatible" stuff only?
|
||||
- can tag words with compatibility classes
|
||||
- can just jump to previous starts of the generated blocks?
|
||||
- can explore/exploit trade-off help us decide when to generate random data, and when to reuse existing?
|
||||
- probably can do thompson sampling when we have online coverage information
|
||||
- arbiter-based distributed system tester
|
||||
194
vendor/pgregory.net/rapid/collections.go
generated
vendored
Normal file
194
vendor/pgregory.net/rapid/collections.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ID returns its argument as is. ID is a helper for use with [SliceOfDistinct] and similar functions.
|
||||
func ID[V any](v V) V {
|
||||
return v
|
||||
}
|
||||
|
||||
// SliceOf is a shorthand for [SliceOfN](elem, -1, -1).
|
||||
func SliceOf[E any](elem *Generator[E]) *Generator[[]E] {
|
||||
return SliceOfN(elem, -1, -1)
|
||||
}
|
||||
|
||||
// SliceOfN creates a []E generator. If minLen >= 0, generated slices have minimum length of minLen.
|
||||
// If maxLen >= 0, generated slices have maximum length of maxLen. SliceOfN panics if maxLen >= 0
|
||||
// and minLen > maxLen.
|
||||
func SliceOfN[E any](elem *Generator[E], minLen int, maxLen int) *Generator[[]E] {
|
||||
assertValidRange(minLen, maxLen)
|
||||
|
||||
return newGenerator[[]E](&sliceGen[E, struct{}]{
|
||||
minLen: minLen,
|
||||
maxLen: maxLen,
|
||||
elem: elem,
|
||||
})
|
||||
}
|
||||
|
||||
// SliceOfDistinct is a shorthand for [SliceOfNDistinct](elem, -1, -1, keyFn).
|
||||
func SliceOfDistinct[E any, K comparable](elem *Generator[E], keyFn func(E) K) *Generator[[]E] {
|
||||
return SliceOfNDistinct(elem, -1, -1, keyFn)
|
||||
}
|
||||
|
||||
// SliceOfNDistinct creates a []E generator. Elements of each generated slice are distinct according to keyFn.
|
||||
// If minLen >= 0, generated slices have minimum length of minLen. If maxLen >= 0, generated slices
|
||||
// have maximum length of maxLen. SliceOfNDistinct panics if maxLen >= 0 and minLen > maxLen.
|
||||
// [ID] helper can be used as keyFn to generate slices of distinct comparable elements.
|
||||
func SliceOfNDistinct[E any, K comparable](elem *Generator[E], minLen int, maxLen int, keyFn func(E) K) *Generator[[]E] {
|
||||
assertValidRange(minLen, maxLen)
|
||||
|
||||
return newGenerator[[]E](&sliceGen[E, K]{
|
||||
minLen: minLen,
|
||||
maxLen: maxLen,
|
||||
elem: elem,
|
||||
keyFn: keyFn,
|
||||
})
|
||||
}
|
||||
|
||||
type sliceGen[E any, K comparable] struct {
|
||||
minLen int
|
||||
maxLen int
|
||||
elem *Generator[E]
|
||||
keyFn func(E) K
|
||||
}
|
||||
|
||||
func (g *sliceGen[E, K]) String() string {
|
||||
if g.keyFn == nil {
|
||||
if g.minLen < 0 && g.maxLen < 0 {
|
||||
return fmt.Sprintf("SliceOf(%v)", g.elem)
|
||||
} else {
|
||||
return fmt.Sprintf("SliceOfN(%v, minLen=%v, maxLen=%v)", g.elem, g.minLen, g.maxLen)
|
||||
}
|
||||
} else {
|
||||
if g.minLen < 0 && g.maxLen < 0 {
|
||||
return fmt.Sprintf("SliceOfDistinct(%v, key=%T)", g.elem, g.keyFn)
|
||||
} else {
|
||||
return fmt.Sprintf("SliceOfNDistinct(%v, minLen=%v, maxLen=%v, key=%T)", g.elem, g.minLen, g.maxLen, g.keyFn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *sliceGen[E, K]) value(t *T) []E {
|
||||
repeat := newRepeat(g.minLen, g.maxLen, -1, g.elem.String())
|
||||
|
||||
var seen map[K]struct{}
|
||||
if g.keyFn != nil {
|
||||
seen = make(map[K]struct{}, repeat.avg())
|
||||
}
|
||||
|
||||
sl := make([]E, 0, repeat.avg())
|
||||
for repeat.more(t.s) {
|
||||
e := g.elem.value(t)
|
||||
if g.keyFn == nil {
|
||||
sl = append(sl, e)
|
||||
} else {
|
||||
k := g.keyFn(e)
|
||||
if _, ok := seen[k]; ok {
|
||||
repeat.reject()
|
||||
} else {
|
||||
seen[k] = struct{}{}
|
||||
sl = append(sl, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sl
|
||||
}
|
||||
|
||||
// MapOf is a shorthand for [MapOfN](key, val, -1, -1).
|
||||
func MapOf[K comparable, V any](key *Generator[K], val *Generator[V]) *Generator[map[K]V] {
|
||||
return MapOfN(key, val, -1, -1)
|
||||
}
|
||||
|
||||
// MapOfN creates a map[K]V generator. If minLen >= 0, generated maps have minimum length of minLen.
|
||||
// If maxLen >= 0, generated maps have maximum length of maxLen. MapOfN panics if maxLen >= 0
|
||||
// and minLen > maxLen.
|
||||
func MapOfN[K comparable, V any](key *Generator[K], val *Generator[V], minLen int, maxLen int) *Generator[map[K]V] {
|
||||
assertValidRange(minLen, maxLen)
|
||||
|
||||
return newGenerator[map[K]V](&mapGen[K, V]{
|
||||
minLen: minLen,
|
||||
maxLen: maxLen,
|
||||
key: key,
|
||||
val: val,
|
||||
})
|
||||
}
|
||||
|
||||
// MapOfValues is a shorthand for [MapOfNValues](val, -1, -1, keyFn).
|
||||
func MapOfValues[K comparable, V any](val *Generator[V], keyFn func(V) K) *Generator[map[K]V] {
|
||||
return MapOfNValues(val, -1, -1, keyFn)
|
||||
}
|
||||
|
||||
// MapOfNValues creates a map[K]V generator, where keys are generated by applying keyFn to values.
|
||||
// If minLen >= 0, generated maps have minimum length of minLen. If maxLen >= 0, generated maps
|
||||
// have maximum length of maxLen. MapOfNValues panics if maxLen >= 0 and minLen > maxLen.
|
||||
func MapOfNValues[K comparable, V any](val *Generator[V], minLen int, maxLen int, keyFn func(V) K) *Generator[map[K]V] {
|
||||
assertValidRange(minLen, maxLen)
|
||||
|
||||
return newGenerator[map[K]V](&mapGen[K, V]{
|
||||
minLen: minLen,
|
||||
maxLen: maxLen,
|
||||
val: val,
|
||||
keyFn: keyFn,
|
||||
})
|
||||
}
|
||||
|
||||
type mapGen[K comparable, V any] struct {
|
||||
minLen int
|
||||
maxLen int
|
||||
key *Generator[K]
|
||||
val *Generator[V]
|
||||
keyFn func(V) K
|
||||
}
|
||||
|
||||
func (g *mapGen[K, V]) String() string {
|
||||
if g.key != nil {
|
||||
if g.minLen < 0 && g.maxLen < 0 {
|
||||
return fmt.Sprintf("MapOf(%v, %v)", g.key, g.val)
|
||||
} else {
|
||||
return fmt.Sprintf("MapOfN(%v, %v, minLen=%v, maxLen=%v)", g.key, g.val, g.minLen, g.maxLen)
|
||||
}
|
||||
} else {
|
||||
if g.minLen < 0 && g.maxLen < 0 {
|
||||
return fmt.Sprintf("MapOfValues(%v, key=%T)", g.val, g.keyFn)
|
||||
} else {
|
||||
return fmt.Sprintf("MapOfNValues(%v, minLen=%v, maxLen=%v, key=%T)", g.val, g.minLen, g.maxLen, g.keyFn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *mapGen[K, V]) value(t *T) map[K]V {
|
||||
label := g.val.String()
|
||||
if g.key != nil {
|
||||
label = g.key.String() + "," + label
|
||||
}
|
||||
|
||||
repeat := newRepeat(g.minLen, g.maxLen, -1, label)
|
||||
|
||||
m := make(map[K]V, repeat.avg())
|
||||
for repeat.more(t.s) {
|
||||
var k K
|
||||
var v V
|
||||
if g.key != nil {
|
||||
k = g.key.value(t)
|
||||
v = g.val.value(t)
|
||||
} else {
|
||||
v = g.val.value(t)
|
||||
k = g.keyFn(v)
|
||||
}
|
||||
|
||||
if _, ok := m[k]; ok {
|
||||
repeat.reject()
|
||||
} else {
|
||||
m[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
287
vendor/pgregory.net/rapid/combinators.go
generated
vendored
Normal file
287
vendor/pgregory.net/rapid/combinators.go
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const tryLabel = "try"
|
||||
|
||||
// Custom creates a generator which produces results of calling fn. In fn, values should be generated
|
||||
// by calling other generators; it is invalid to return a value from fn without using any other generator.
|
||||
// Custom is a primary way of creating user-defined generators.
|
||||
func Custom[V any](fn func(*T) V) *Generator[V] {
|
||||
return newGenerator[V](&customGen[V]{
|
||||
fn: fn,
|
||||
})
|
||||
}
|
||||
|
||||
type customGen[V any] struct {
|
||||
fn func(*T) V
|
||||
}
|
||||
|
||||
func (g *customGen[V]) String() string {
|
||||
var v V
|
||||
return fmt.Sprintf("Custom(%T)", v)
|
||||
}
|
||||
|
||||
func (g *customGen[V]) value(t *T) V {
|
||||
return find(g.maybeValue, t, small)
|
||||
}
|
||||
|
||||
func (g *customGen[V]) maybeValue(t *T) (V, bool) {
|
||||
t = newT(t.tb, t.s, flags.debug, nil)
|
||||
defer t.cleanup()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(invalidData); !ok {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return g.fn(t), true
|
||||
}
|
||||
|
||||
// Deferred creates a generator which defers calling fn until attempting to produce a value. This allows
|
||||
// to define recursive generators.
|
||||
func Deferred[V any](fn func() *Generator[V]) *Generator[V] {
|
||||
return newGenerator[V](&deferredGen[V]{
|
||||
fn: fn,
|
||||
})
|
||||
}
|
||||
|
||||
type deferredGen[V any] struct {
|
||||
g *Generator[V]
|
||||
fn func() *Generator[V]
|
||||
}
|
||||
|
||||
func (g *deferredGen[V]) String() string {
|
||||
var v V
|
||||
return fmt.Sprintf("Deferred(%T)", v)
|
||||
}
|
||||
|
||||
func (g *deferredGen[V]) value(t *T) V {
|
||||
if g.g == nil {
|
||||
g.g = g.fn()
|
||||
}
|
||||
return g.g.value(t)
|
||||
}
|
||||
|
||||
func filter[V any](g *Generator[V], fn func(V) bool) *Generator[V] {
|
||||
return newGenerator[V](&filteredGen[V]{
|
||||
g: g,
|
||||
fn: fn,
|
||||
})
|
||||
}
|
||||
|
||||
type filteredGen[V any] struct {
|
||||
g *Generator[V]
|
||||
fn func(V) bool
|
||||
}
|
||||
|
||||
func (g *filteredGen[V]) String() string {
|
||||
return fmt.Sprintf("%v.Filter(...)", g.g)
|
||||
}
|
||||
|
||||
func (g *filteredGen[V]) value(t *T) V {
|
||||
return find(g.maybeValue, t, small)
|
||||
}
|
||||
|
||||
func (g *filteredGen[V]) maybeValue(t *T) (V, bool) {
|
||||
v := g.g.value(t)
|
||||
if g.fn(v) {
|
||||
return v, true
|
||||
} else {
|
||||
var zero V
|
||||
return zero, false
|
||||
}
|
||||
}
|
||||
|
||||
func find[V any](gen func(*T) (V, bool), t *T, tries int) V {
|
||||
for n := 0; n < tries; n++ {
|
||||
i := t.s.beginGroup(tryLabel, false)
|
||||
v, ok := gen(t)
|
||||
t.s.endGroup(i, !ok)
|
||||
if ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
panic(invalidData(fmt.Sprintf("failed to find suitable value in %d tries", tries)))
|
||||
}
|
||||
|
||||
// Map creates a generator producing fn(u) for each u produced by g.
|
||||
func Map[U any, V any](g *Generator[U], fn func(U) V) *Generator[V] {
|
||||
return newGenerator[V](&mappedGen[U, V]{
|
||||
g: g,
|
||||
fn: fn,
|
||||
})
|
||||
}
|
||||
|
||||
type mappedGen[U any, V any] struct {
|
||||
g *Generator[U]
|
||||
fn func(U) V
|
||||
}
|
||||
|
||||
func (g *mappedGen[U, V]) String() string {
|
||||
return fmt.Sprintf("Map(%v, %T)", g.g, g.fn)
|
||||
}
|
||||
|
||||
func (g *mappedGen[U, V]) value(t *T) V {
|
||||
return g.fn(g.g.value(t))
|
||||
}
|
||||
|
||||
// Just creates a generator which always produces the given value.
|
||||
// Just(val) is a shorthand for [SampledFrom]([]V{val}).
|
||||
func Just[V any](val V) *Generator[V] {
|
||||
return SampledFrom([]V{val})
|
||||
}
|
||||
|
||||
// SampledFrom creates a generator which produces values from the given slice.
|
||||
// SampledFrom panics if slice is empty.
|
||||
func SampledFrom[S ~[]E, E any](slice S) *Generator[E] {
|
||||
assertf(len(slice) > 0, "slice should not be empty")
|
||||
|
||||
return newGenerator[E](&sampledGen[E]{
|
||||
slice: slice,
|
||||
})
|
||||
}
|
||||
|
||||
type sampledGen[E any] struct {
|
||||
slice []E
|
||||
}
|
||||
|
||||
func (g *sampledGen[E]) String() string {
|
||||
if len(g.slice) == 1 {
|
||||
return fmt.Sprintf("Just(%v)", g.slice[0])
|
||||
} else {
|
||||
return fmt.Sprintf("SampledFrom(%v %T)", len(g.slice), g.slice[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (g *sampledGen[E]) value(t *T) E {
|
||||
i := genIndex(t.s, len(g.slice), true)
|
||||
|
||||
return g.slice[i]
|
||||
}
|
||||
|
||||
// Permutation creates a generator which produces permutations of the given slice.
|
||||
func Permutation[S ~[]E, E any](slice S) *Generator[S] {
|
||||
return newGenerator[S](&permGen[S, E]{
|
||||
slice: slice,
|
||||
})
|
||||
}
|
||||
|
||||
type permGen[S ~[]E, E any] struct {
|
||||
slice S
|
||||
}
|
||||
|
||||
func (g *permGen[S, E]) String() string {
|
||||
var zero E
|
||||
return fmt.Sprintf("Permutation(%v %T)", len(g.slice), zero)
|
||||
}
|
||||
|
||||
func (g *permGen[S, E]) value(t *T) S {
|
||||
s := append(S(nil), g.slice...)
|
||||
n := len(s)
|
||||
m := n - 1
|
||||
if m < 0 {
|
||||
m = 0
|
||||
}
|
||||
|
||||
// shrink-friendly variant of Fisher–Yates shuffle: shrinks to lower number of smaller distance swaps
|
||||
repeat := newRepeat(0, m, math.MaxInt, "permute")
|
||||
for i := 0; repeat.more(t.s); i++ {
|
||||
j, _, _ := genUintRange(t.s, uint64(i), uint64(n-1), false)
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// OneOf creates a generator which produces each value by selecting one of gens and producing a value from it.
|
||||
// OneOf panics if gens is empty.
|
||||
func OneOf[V any](gens ...*Generator[V]) *Generator[V] {
|
||||
assertf(len(gens) > 0, "at least one generator should be specified")
|
||||
|
||||
return newGenerator[V](&oneOfGen[V]{
|
||||
gens: gens,
|
||||
})
|
||||
}
|
||||
|
||||
type oneOfGen[V any] struct {
|
||||
gens []*Generator[V]
|
||||
}
|
||||
|
||||
func (g *oneOfGen[V]) String() string {
|
||||
strs := make([]string, len(g.gens))
|
||||
for i, g := range g.gens {
|
||||
strs[i] = g.String()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("OneOf(%v)", strings.Join(strs, ", "))
|
||||
}
|
||||
|
||||
func (g *oneOfGen[V]) value(t *T) V {
|
||||
i := genIndex(t.s, len(g.gens), true)
|
||||
|
||||
return g.gens[i].value(t)
|
||||
}
|
||||
|
||||
// Ptr creates a *E generator. If allowNil is true, Ptr can return nil pointers.
|
||||
func Ptr[E any](elem *Generator[E], allowNil bool) *Generator[*E] {
|
||||
return newGenerator[*E](&ptrGen[E]{
|
||||
elem: elem,
|
||||
allowNil: allowNil,
|
||||
})
|
||||
}
|
||||
|
||||
type ptrGen[E any] struct {
|
||||
elem *Generator[E]
|
||||
allowNil bool
|
||||
}
|
||||
|
||||
func (g *ptrGen[E]) String() string {
|
||||
return fmt.Sprintf("Ptr(%v, allowNil=%v)", g.elem, g.allowNil)
|
||||
}
|
||||
|
||||
func (g *ptrGen[E]) value(t *T) *E {
|
||||
pNonNil := float64(1)
|
||||
if g.allowNil {
|
||||
pNonNil = 0.5
|
||||
}
|
||||
|
||||
if flipBiasedCoin(t.s, pNonNil) {
|
||||
e := g.elem.value(t)
|
||||
return &e
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func asAny[V any](g *Generator[V]) *Generator[any] {
|
||||
return newGenerator[any](&asAnyGen[V]{
|
||||
gen: g,
|
||||
})
|
||||
}
|
||||
|
||||
type asAnyGen[V any] struct {
|
||||
gen *Generator[V]
|
||||
}
|
||||
|
||||
func (g *asAnyGen[V]) String() string {
|
||||
return fmt.Sprintf("%v.AsAny()", g.gen)
|
||||
}
|
||||
|
||||
func (g *asAnyGen[V]) value(t *T) any {
|
||||
return g.gen.value(t)
|
||||
}
|
||||
202
vendor/pgregory.net/rapid/data.go
generated
vendored
Normal file
202
vendor/pgregory.net/rapid/data.go
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"hash/maphash"
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
type bitStream interface {
|
||||
drawBits(n int) uint64
|
||||
beginGroup(label string, standalone bool) int
|
||||
endGroup(i int, discard bool)
|
||||
}
|
||||
|
||||
func baseSeed() uint64 {
|
||||
if flags.seed != 0 {
|
||||
return flags.seed
|
||||
}
|
||||
|
||||
return new(maphash.Hash).Sum64()
|
||||
}
|
||||
|
||||
type randomBitStream struct {
|
||||
ctx jsf64ctx
|
||||
recordedBits
|
||||
}
|
||||
|
||||
func newRandomBitStream(seed uint64, persist bool) *randomBitStream {
|
||||
s := &randomBitStream{}
|
||||
s.init(seed)
|
||||
s.persist = persist
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *randomBitStream) init(seed uint64) {
|
||||
s.ctx.init(seed)
|
||||
}
|
||||
|
||||
func (s *randomBitStream) drawBits(n int) uint64 {
|
||||
assert(n >= 0)
|
||||
|
||||
var u uint64
|
||||
if n <= 64 {
|
||||
u = s.ctx.rand() & bitmask64(uint(n))
|
||||
} else {
|
||||
u = math.MaxUint64
|
||||
}
|
||||
s.record(u)
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
type bufBitStream struct {
|
||||
buf []uint64
|
||||
recordedBits
|
||||
}
|
||||
|
||||
func newBufBitStream(buf []uint64, persist bool) *bufBitStream {
|
||||
s := &bufBitStream{
|
||||
buf: buf,
|
||||
}
|
||||
s.persist = persist
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *bufBitStream) drawBits(n int) uint64 {
|
||||
assert(n >= 0)
|
||||
|
||||
if len(s.buf) == 0 {
|
||||
panic(invalidData("overrun"))
|
||||
}
|
||||
|
||||
u := s.buf[0] & bitmask64(uint(n))
|
||||
s.record(u)
|
||||
s.buf = s.buf[1:]
|
||||
|
||||
return u
|
||||
}
|
||||
|
||||
type groupInfo struct {
|
||||
begin int
|
||||
end int
|
||||
label string
|
||||
standalone bool
|
||||
discard bool
|
||||
}
|
||||
|
||||
type recordedBits struct {
|
||||
data []uint64
|
||||
groups []groupInfo
|
||||
dataLen int
|
||||
persist bool
|
||||
}
|
||||
|
||||
func (rec *recordedBits) record(u uint64) {
|
||||
if rec.persist {
|
||||
rec.data = append(rec.data, u)
|
||||
} else {
|
||||
rec.dataLen++
|
||||
}
|
||||
}
|
||||
|
||||
func (rec *recordedBits) beginGroup(label string, standalone bool) int {
|
||||
if !rec.persist {
|
||||
return rec.dataLen
|
||||
}
|
||||
|
||||
rec.groups = append(rec.groups, groupInfo{
|
||||
begin: len(rec.data),
|
||||
end: -1,
|
||||
label: label,
|
||||
standalone: standalone,
|
||||
})
|
||||
|
||||
return len(rec.groups) - 1
|
||||
}
|
||||
|
||||
func (rec *recordedBits) endGroup(i int, discard bool) {
|
||||
assertf(discard || (!rec.persist && rec.dataLen > i) || (rec.persist && len(rec.data) > rec.groups[i].begin),
|
||||
"group did not use any data from bitstream; this is likely a result of Custom generator not calling any of the built-in generators")
|
||||
|
||||
if !rec.persist {
|
||||
return
|
||||
}
|
||||
|
||||
rec.groups[i].end = len(rec.data)
|
||||
rec.groups[i].discard = discard
|
||||
}
|
||||
|
||||
func (rec *recordedBits) prune() {
|
||||
assert(rec.persist)
|
||||
|
||||
for i := 0; i < len(rec.groups); {
|
||||
if rec.groups[i].discard {
|
||||
rec.removeGroup(i) // O(n^2)
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
for _, g := range rec.groups {
|
||||
assert(g.begin != g.end)
|
||||
}
|
||||
}
|
||||
|
||||
func (rec *recordedBits) removeGroup(i int) {
|
||||
g := rec.groups[i]
|
||||
assert(g.end >= 0)
|
||||
|
||||
j := i + 1
|
||||
for j < len(rec.groups) && rec.groups[j].end <= g.end {
|
||||
j++
|
||||
}
|
||||
|
||||
rec.data = append(rec.data[:g.begin], rec.data[g.end:]...)
|
||||
rec.groups = append(rec.groups[:i], rec.groups[j:]...)
|
||||
|
||||
n := g.end - g.begin
|
||||
for j := range rec.groups {
|
||||
if rec.groups[j].begin >= g.end {
|
||||
rec.groups[j].begin -= n
|
||||
}
|
||||
if rec.groups[j].end >= g.end {
|
||||
rec.groups[j].end -= n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// "A Small Noncryptographic PRNG" by Bob Jenkins
|
||||
// See http://www.pcg-random.org/posts/bob-jenkins-small-prng-passes-practrand.html for some recent analysis.
|
||||
type jsf64ctx struct {
|
||||
a uint64
|
||||
b uint64
|
||||
c uint64
|
||||
d uint64
|
||||
}
|
||||
|
||||
func (x *jsf64ctx) init(seed uint64) {
|
||||
x.a = 0xf1ea5eed
|
||||
x.b = seed
|
||||
x.c = seed
|
||||
x.d = seed
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
x.rand()
|
||||
}
|
||||
}
|
||||
|
||||
func (x *jsf64ctx) rand() uint64 {
|
||||
e := x.a - bits.RotateLeft64(x.b, 7)
|
||||
x.a = x.b ^ bits.RotateLeft64(x.c, 13)
|
||||
x.b = x.c + bits.RotateLeft64(x.d, 37)
|
||||
x.c = x.d + e
|
||||
x.d = e + x.a
|
||||
return x.d
|
||||
}
|
||||
57
vendor/pgregory.net/rapid/doc.go
generated
vendored
Normal file
57
vendor/pgregory.net/rapid/doc.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
/*
|
||||
Package rapid implements utilities for property-based testing.
|
||||
|
||||
[Check] verifies that properties you define hold for a large number
|
||||
of automatically generated test cases. If a failure is found, rapid
|
||||
fails the current test and presents an automatically minimized
|
||||
version of the failing test case.
|
||||
|
||||
[T.Repeat] is used to construct state machine (sometimes called "stateful"
|
||||
or "model-based") tests.
|
||||
|
||||
# Generators
|
||||
|
||||
Primitives:
|
||||
- [Bool]
|
||||
- [Rune], [RuneFrom]
|
||||
- [Byte], [ByteMin], [ByteMax], [ByteRange]
|
||||
- [Int], [IntMin], [IntMax], [IntRange]
|
||||
- [Int8], [Int8Min], [Int8Max], [Int8Range]
|
||||
- [Int16], [Int16Min], [Int16Max], [Int16Range]
|
||||
- [Int32], [Int32Min], [Int32Max], [Int32Range]
|
||||
- [Int64], [Int64Min], [Int64Max], [Int64Range]
|
||||
- [Uint], [UintMin], [UintMax], [UintRange]
|
||||
- [Uint8], [Uint8Min], [Uint8Max], [Uint8Range]
|
||||
- [Uint16], [Uint16Min], [Uint16Max], [Uint16Range]
|
||||
- [Uint32], [Uint32Min], [Uint32Max], [Uint32Range]
|
||||
- [Uint64], [Uint64Min], [Uint64Max], [Uint64Range]
|
||||
- [Uintptr], [UintptrMin], [UintptrMax], [UintptrRange]
|
||||
- [Float32], [Float32Min], [Float32Max], [Float32Range]
|
||||
- [Float64], [Float64Min], [Float64Max], [Float64Range]
|
||||
|
||||
Collections:
|
||||
- [String], [StringMatching], [StringOf], [StringOfN], [StringN]
|
||||
- [SliceOfBytesMatching]
|
||||
- [SliceOf], [SliceOfN], [SliceOfDistinct], [SliceOfNDistinct]
|
||||
- [Permutation]
|
||||
- [MapOf], [MapOfN], [MapOfValues], [MapOfNValues]
|
||||
|
||||
User-defined types:
|
||||
- [Custom]
|
||||
- [Make]
|
||||
|
||||
Other:
|
||||
- [Map],
|
||||
- [Generator.Filter]
|
||||
- [SampledFrom], [Just]
|
||||
- [OneOf]
|
||||
- [Deferred]
|
||||
- [Ptr]
|
||||
*/
|
||||
package rapid
|
||||
792
vendor/pgregory.net/rapid/engine.go
generated
vendored
Normal file
792
vendor/pgregory.net/rapid/engine.go
generated
vendored
Normal file
@@ -0,0 +1,792 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
small = 5
|
||||
invalidChecksMult = 10
|
||||
exampleMaxTries = 1000
|
||||
|
||||
maxTestTimeout = 24 * time.Hour
|
||||
shrinkStepBound = 10 * time.Second // can be improved by taking average checkOnce runtime into account
|
||||
|
||||
tracebackLen = 32
|
||||
tracebackStop = "pgregory.net/rapid.checkOnce"
|
||||
runtimePrefix = "runtime."
|
||||
)
|
||||
|
||||
var (
|
||||
flags cmdline
|
||||
|
||||
tracebackBlacklist = map[string]bool{
|
||||
"pgregory.net/rapid.(*customGen[...]).maybeValue.func1": true,
|
||||
"pgregory.net/rapid.runAction.func1": true,
|
||||
}
|
||||
)
|
||||
|
||||
type cmdline struct {
|
||||
checks int
|
||||
steps int
|
||||
failfile string
|
||||
nofailfile bool
|
||||
seed uint64
|
||||
log bool
|
||||
verbose bool
|
||||
debug bool
|
||||
debugvis bool
|
||||
shrinkTime time.Duration
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.IntVar(&flags.checks, "rapid.checks", 100, "rapid: number of checks to perform")
|
||||
flag.IntVar(&flags.steps, "rapid.steps", 30, "rapid: average number of Repeat actions to execute")
|
||||
flag.StringVar(&flags.failfile, "rapid.failfile", "", "rapid: fail file to use to reproduce test failure")
|
||||
flag.BoolVar(&flags.nofailfile, "rapid.nofailfile", false, "rapid: do not write fail files on test failures")
|
||||
flag.Uint64Var(&flags.seed, "rapid.seed", 0, "rapid: PRNG seed to start with (0 to use a random one)")
|
||||
flag.BoolVar(&flags.log, "rapid.log", false, "rapid: eager verbose output to stdout (to aid with unrecoverable test failures)")
|
||||
flag.BoolVar(&flags.verbose, "rapid.v", false, "rapid: verbose output")
|
||||
flag.BoolVar(&flags.debug, "rapid.debug", false, "rapid: debugging output")
|
||||
flag.BoolVar(&flags.debugvis, "rapid.debugvis", false, "rapid: debugging visualization")
|
||||
flag.DurationVar(&flags.shrinkTime, "rapid.shrinktime", 30*time.Second, "rapid: maximum time to spend on test case minimization")
|
||||
}
|
||||
|
||||
func assert(ok bool) {
|
||||
if !ok {
|
||||
panic("assertion failed")
|
||||
}
|
||||
}
|
||||
|
||||
func assertf(ok bool, format string, args ...any) {
|
||||
if !ok {
|
||||
panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func assertValidRange(min int, max int) {
|
||||
if max >= 0 && min > max {
|
||||
panic(fmt.Sprintf("invalid range [%d, %d]", min, max))
|
||||
}
|
||||
}
|
||||
|
||||
func checkDeadline(tb tb) time.Time {
|
||||
t, ok := tb.(*testing.T)
|
||||
if !ok {
|
||||
return time.Now().Add(maxTestTimeout)
|
||||
}
|
||||
d, ok := t.Deadline()
|
||||
if !ok {
|
||||
return time.Now().Add(maxTestTimeout)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func shrinkDeadline(deadline time.Time) time.Time {
|
||||
d := time.Now().Add(flags.shrinkTime)
|
||||
max := deadline.Add(-shrinkStepBound) // account for the fact that shrink deadline is checked before the step
|
||||
if d.After(max) {
|
||||
d = max
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// Check fails the current test if rapid can find a test case which falsifies prop.
|
||||
//
|
||||
// Property is falsified in case of a panic or a call to
|
||||
// [*T.Fatalf], [*T.Fatal], [*T.Errorf], [*T.Error], [*T.FailNow] or [*T.Fail].
|
||||
func Check(t TB, prop func(*T)) {
|
||||
t.Helper()
|
||||
checkTB(t, checkDeadline(t), prop)
|
||||
}
|
||||
|
||||
// MakeCheck is a convenience function for defining subtests suitable for
|
||||
// [*testing.T.Run]. It allows you to write this:
|
||||
//
|
||||
// t.Run("subtest name", rapid.MakeCheck(func(t *rapid.T) {
|
||||
// // test code
|
||||
// }))
|
||||
//
|
||||
// instead of this:
|
||||
//
|
||||
// t.Run("subtest name", func(t *testing.T) {
|
||||
// rapid.Check(t, func(t *rapid.T) {
|
||||
// // test code
|
||||
// })
|
||||
// })
|
||||
func MakeCheck(prop func(*T)) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
t.Helper()
|
||||
checkTB(t, checkDeadline(t), prop)
|
||||
}
|
||||
}
|
||||
|
||||
// MakeFuzz creates a fuzz target for [*testing.F.Fuzz]:
|
||||
//
|
||||
// func FuzzFoo(f *testing.F) {
|
||||
// f.Fuzz(rapid.MakeFuzz(func(t *rapid.T) {
|
||||
// // test code
|
||||
// }))
|
||||
// }
|
||||
func MakeFuzz(prop func(*T)) func(*testing.T, []byte) {
|
||||
return func(t *testing.T, input []byte) {
|
||||
t.Helper()
|
||||
checkFuzz(t, prop, input)
|
||||
}
|
||||
}
|
||||
|
||||
func checkFuzz(tb tb, prop func(*T), input []byte) {
|
||||
tb.Helper()
|
||||
|
||||
var buf []uint64
|
||||
for len(input) > 0 {
|
||||
var tmp [8]byte
|
||||
n := copy(tmp[:], input)
|
||||
buf = append(buf, binary.LittleEndian.Uint64(tmp[:]))
|
||||
input = input[n:]
|
||||
}
|
||||
|
||||
t := newT(tb, newBufBitStream(buf, false), true, nil)
|
||||
err := checkOnce(t, prop)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
// do nothing
|
||||
case err.isInvalidData():
|
||||
tb.SkipNow()
|
||||
case err.isStopTest():
|
||||
tb.Fatalf("[rapid] failed: %v", err)
|
||||
default:
|
||||
tb.Fatalf("[rapid] panic: %v\nTraceback:\n%v", err, traceback(err))
|
||||
}
|
||||
}
|
||||
|
||||
func checkTB(tb tb, deadline time.Time, prop func(*T)) {
|
||||
tb.Helper()
|
||||
|
||||
checks := flags.checks
|
||||
if testing.Short() {
|
||||
checks /= 5
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
valid, invalid, earlyExit, seed, failfile, buf, err1, err2 := doCheck(tb, deadline, checks, baseSeed(), flags.failfile, true, prop)
|
||||
dt := time.Since(start)
|
||||
|
||||
if err1 == nil && err2 == nil {
|
||||
if valid == checks || (earlyExit && valid > 0) {
|
||||
tb.Logf("[rapid] OK, passed %v tests (%v)", valid, dt)
|
||||
} else {
|
||||
tb.Errorf("[rapid] only generated %v valid tests from %v total (%v)", valid, valid+invalid, dt)
|
||||
}
|
||||
} else {
|
||||
if failfile == "" && !flags.nofailfile {
|
||||
_, failfile = failFileName(tb.Name())
|
||||
out := captureTestOutput(tb, prop, buf)
|
||||
err := saveFailFile(failfile, rapidVersion, out, seed, buf)
|
||||
if err != nil {
|
||||
tb.Logf("[rapid] %v", err)
|
||||
failfile = ""
|
||||
}
|
||||
}
|
||||
|
||||
var repr string
|
||||
switch {
|
||||
case failfile != "" && seed != 0:
|
||||
repr = fmt.Sprintf("-rapid.failfile=%q (or -rapid.seed=%d)", failfile, seed)
|
||||
case failfile != "":
|
||||
repr = fmt.Sprintf("-rapid.failfile=%q", failfile)
|
||||
case seed != 0:
|
||||
repr = fmt.Sprintf("-rapid.seed=%d", seed)
|
||||
}
|
||||
|
||||
name := regexp.QuoteMeta(tb.Name())
|
||||
if traceback(err1) == traceback(err2) {
|
||||
if err2.isStopTest() {
|
||||
tb.Errorf("[rapid] failed after %v tests: %v\nTo reproduce, specify -run=%q %v\nFailed test output:", valid, err2, name, repr)
|
||||
} else {
|
||||
tb.Errorf("[rapid] panic after %v tests: %v\nTo reproduce, specify -run=%q %v\nTraceback:\n%vFailed test output:", valid, err2, name, repr, traceback(err2))
|
||||
}
|
||||
} else {
|
||||
tb.Errorf("[rapid] flaky test, can not reproduce a failure\nTo try to reproduce, specify -run=%q %v\nTraceback (%v):\n%vOriginal traceback (%v):\n%vFailed test output:", name, repr, err2, traceback(err2), err1, traceback(err1))
|
||||
}
|
||||
|
||||
_ = checkOnce(newT(tb, newBufBitStream(buf, false), true, nil), prop) // output using (*testing.T).Log for proper line numbers
|
||||
}
|
||||
|
||||
if tb.Failed() {
|
||||
tb.FailNow() // do not try to run any checks after the first failed one
|
||||
}
|
||||
}
|
||||
|
||||
func doCheck(tb tb, deadline time.Time, checks int, seed uint64, failfile string, globFailFiles bool, prop func(*T)) (int, int, bool, uint64, string, []uint64, *testError, *testError) {
|
||||
tb.Helper()
|
||||
|
||||
assertf(!tb.Failed(), "check function called with *testing.T which has already failed")
|
||||
|
||||
var failfiles []string
|
||||
if failfile != "" {
|
||||
failfiles = []string{failfile}
|
||||
}
|
||||
if globFailFiles {
|
||||
matches, _ := filepath.Glob(failFilePattern(tb.Name()))
|
||||
failfiles = append(failfiles, matches...)
|
||||
}
|
||||
for _, failfile := range failfiles {
|
||||
buf, err1, err2 := checkFailFile(tb, failfile, prop)
|
||||
if err1 != nil || err2 != nil {
|
||||
return 0, 0, false, 0, failfile, buf, err1, err2
|
||||
}
|
||||
}
|
||||
|
||||
valid, invalid, earlyExit, seed, err1 := findBug(tb, deadline, checks, seed, prop)
|
||||
if err1 == nil {
|
||||
return valid, invalid, earlyExit, 0, "", nil, nil, nil
|
||||
}
|
||||
|
||||
s := newRandomBitStream(seed, true)
|
||||
t := newT(tb, s, flags.verbose, nil)
|
||||
t.Logf("[rapid] trying to reproduce the failure")
|
||||
err2 := checkOnce(t, prop)
|
||||
if !sameError(err1, err2) {
|
||||
return valid, invalid, false, seed, "", s.data, err1, err2
|
||||
}
|
||||
|
||||
t.Logf("[rapid] trying to minimize the failing test case")
|
||||
buf, err3 := shrink(tb, shrinkDeadline(deadline), s.recordedBits, err2, prop)
|
||||
|
||||
return valid, invalid, false, seed, "", buf, err2, err3
|
||||
}
|
||||
|
||||
func checkFailFile(tb tb, failfile string, prop func(*T)) ([]uint64, *testError, *testError) {
|
||||
tb.Helper()
|
||||
|
||||
version, _, buf, err := loadFailFile(failfile)
|
||||
if err != nil {
|
||||
tb.Logf("[rapid] ignoring fail file: %v", err)
|
||||
return nil, nil, nil
|
||||
}
|
||||
if version != rapidVersion {
|
||||
tb.Logf("[rapid] ignoring fail file: version %q differs from rapid version %q", version, rapidVersion)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
s1 := newBufBitStream(buf, false)
|
||||
t1 := newT(tb, s1, flags.verbose, nil)
|
||||
err1 := checkOnce(t1, prop)
|
||||
if err1 == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
if err1.isInvalidData() {
|
||||
tb.Logf("[rapid] fail file %q is no longer valid", failfile)
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
s2 := newBufBitStream(buf, false)
|
||||
t2 := newT(tb, s2, flags.verbose, nil)
|
||||
t2.Logf("[rapid] trying to reproduce the failure")
|
||||
err2 := checkOnce(t2, prop)
|
||||
|
||||
return buf, err1, err2
|
||||
}
|
||||
|
||||
func findBug(tb tb, deadline time.Time, checks int, seed uint64, prop func(*T)) (int, int, bool, uint64, *testError) {
|
||||
tb.Helper()
|
||||
|
||||
var (
|
||||
r = newRandomBitStream(0, false)
|
||||
t = newT(tb, r, flags.verbose, nil)
|
||||
valid = 0
|
||||
invalid = 0
|
||||
)
|
||||
|
||||
var total time.Duration
|
||||
for valid < checks && invalid < checks*invalidChecksMult {
|
||||
iter := valid + invalid
|
||||
if iter > 0 && time.Until(deadline) < total/time.Duration(iter)*5 {
|
||||
if t.shouldLog() {
|
||||
t.Logf("[rapid] early exit after test #%v (%v)", iter, total)
|
||||
}
|
||||
return valid, invalid, true, 0, nil
|
||||
}
|
||||
|
||||
seed += uint64(iter)
|
||||
r.init(seed)
|
||||
start := time.Now()
|
||||
if t.shouldLog() {
|
||||
t.Logf("[rapid] test #%v start (seed %v)", iter+1, seed)
|
||||
}
|
||||
|
||||
err := checkOnce(t, prop)
|
||||
dt := time.Since(start)
|
||||
total += dt
|
||||
if err == nil {
|
||||
if t.shouldLog() {
|
||||
t.Logf("[rapid] test #%v OK (%v)", iter+1, dt)
|
||||
}
|
||||
valid++
|
||||
} else if err.isInvalidData() {
|
||||
if t.shouldLog() {
|
||||
t.Logf("[rapid] test #%v invalid (%v)", iter+1, dt)
|
||||
}
|
||||
invalid++
|
||||
} else {
|
||||
if t.shouldLog() {
|
||||
t.Logf("[rapid] test #%v failed: %v", iter+1, err)
|
||||
}
|
||||
return valid, invalid, false, seed, err
|
||||
}
|
||||
}
|
||||
|
||||
return valid, invalid, false, 0, nil
|
||||
}
|
||||
|
||||
func checkOnce(t *T, prop func(*T)) (err *testError) {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
defer func() { err = panicToError(recover(), 3) }()
|
||||
|
||||
defer t.cleanup()
|
||||
prop(t)
|
||||
t.failOnError()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func captureTestOutput(tb tb, prop func(*T), buf []uint64) []byte {
|
||||
var b bytes.Buffer
|
||||
l := log.New(&b, fmt.Sprintf("[%v] ", tb.Name()), log.Lmsgprefix|log.Ldate|log.Ltime|log.Lmicroseconds)
|
||||
_ = checkOnce(newT(tb, newBufBitStream(buf, false), false, l), prop)
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
type invalidData string
|
||||
type stopTest string
|
||||
|
||||
type testError struct {
|
||||
data any
|
||||
traceback string
|
||||
}
|
||||
|
||||
func panicToError(p any, skip int) *testError {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
callers := make([]uintptr, tracebackLen)
|
||||
callers = callers[:runtime.Callers(skip, callers)]
|
||||
frames := runtime.CallersFrames(callers)
|
||||
|
||||
b := &strings.Builder{}
|
||||
f, more, skipSpecial := runtime.Frame{}, true, true
|
||||
for more && !strings.HasSuffix(f.Function, tracebackStop) {
|
||||
f, more = frames.Next()
|
||||
|
||||
if skipSpecial && (tracebackBlacklist[f.Function] || strings.HasPrefix(f.Function, runtimePrefix)) {
|
||||
continue
|
||||
}
|
||||
skipSpecial = false
|
||||
|
||||
_, err := fmt.Fprintf(b, " %s:%d in %s\n", f.File, f.Line, f.Function)
|
||||
assert(err == nil)
|
||||
}
|
||||
|
||||
return &testError{
|
||||
data: p,
|
||||
traceback: b.String(),
|
||||
}
|
||||
}
|
||||
|
||||
func (err *testError) Error() string {
|
||||
if msg, ok := err.data.(stopTest); ok {
|
||||
return string(msg)
|
||||
}
|
||||
|
||||
if msg, ok := err.data.(invalidData); ok {
|
||||
return fmt.Sprintf("invalid data: %s", string(msg))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", err.data)
|
||||
}
|
||||
|
||||
func (err *testError) isInvalidData() bool {
|
||||
_, ok := err.data.(invalidData)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err *testError) isStopTest() bool {
|
||||
_, ok := err.data.(stopTest)
|
||||
return ok
|
||||
}
|
||||
|
||||
func sameError(err1 *testError, err2 *testError) bool {
|
||||
return errorString(err1) == errorString(err2) && traceback(err1) == traceback(err2)
|
||||
}
|
||||
|
||||
func errorString(err *testError) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
func traceback(err *testError) string {
|
||||
if err == nil {
|
||||
return " <no error>\n"
|
||||
}
|
||||
|
||||
return err.traceback
|
||||
}
|
||||
|
||||
// TB is a common interface between [*testing.T], [*testing.B] and [*T].
|
||||
type TB interface {
|
||||
Helper()
|
||||
Name() string
|
||||
Logf(format string, args ...any)
|
||||
Log(args ...any)
|
||||
Skipf(format string, args ...any)
|
||||
Skip(args ...any)
|
||||
SkipNow()
|
||||
Errorf(format string, args ...any)
|
||||
Error(args ...any)
|
||||
Fatalf(format string, args ...any)
|
||||
Fatal(args ...any)
|
||||
FailNow()
|
||||
Fail()
|
||||
Failed() bool
|
||||
}
|
||||
|
||||
type tb TB // tb is a private copy of TB, made to avoid T having public fields
|
||||
|
||||
type nilTB struct{}
|
||||
|
||||
func (nilTB) Helper() {}
|
||||
func (nilTB) Name() string { return "" }
|
||||
func (nilTB) Logf(string, ...any) {}
|
||||
func (nilTB) Log(...any) {}
|
||||
func (nilTB) Skipf(string, ...any) { panic("call to TB.Skipf() outside a test") }
|
||||
func (nilTB) Skip(...any) { panic("call to TB.Skip() outside a test") }
|
||||
func (nilTB) SkipNow() { panic("call to TB.SkipNow() outside a test") }
|
||||
func (nilTB) Errorf(string, ...any) { panic("call to TB.Errorf() outside a test") }
|
||||
func (nilTB) Error(...any) { panic("call to TB.Error() outside a test") }
|
||||
func (nilTB) Fatalf(string, ...any) { panic("call to TB.Fatalf() outside a test") }
|
||||
func (nilTB) Fatal(...any) { panic("call to TB.Fatal() outside a test") }
|
||||
func (nilTB) FailNow() { panic("call to TB.FailNow() outside a test") }
|
||||
func (nilTB) Fail() { panic("call to TB.Fail() outside a test") }
|
||||
func (nilTB) Failed() bool { panic("call to TB.Failed() outside a test") }
|
||||
|
||||
// T is similar to [testing.T], but with extra bookkeeping for property-based tests.
|
||||
//
|
||||
// For tests to be reproducible, they should generally run in a single goroutine.
|
||||
// If concurrency is unavoidable, methods on *T, such as [*testing.T.Helper] and [*T.Errorf],
|
||||
// are safe for concurrent calls, but *Generator.Draw from a given *T is not.
|
||||
type T struct {
|
||||
tb // unnamed to force re-export of (*T).Helper()
|
||||
|
||||
ctx context.Context
|
||||
cancelCtx context.CancelFunc
|
||||
cleanups []func()
|
||||
cleaning atomic.Bool
|
||||
|
||||
tbLog bool
|
||||
rawLog *log.Logger
|
||||
s bitStream
|
||||
draws int
|
||||
refDraws []any
|
||||
mu sync.RWMutex
|
||||
failed stopTest
|
||||
}
|
||||
|
||||
func newT(tb tb, s bitStream, tbLog bool, rawLog *log.Logger, refDraws ...any) *T {
|
||||
if tb == nil {
|
||||
tb = nilTB{}
|
||||
}
|
||||
|
||||
t := &T{
|
||||
tb: tb,
|
||||
tbLog: tbLog,
|
||||
rawLog: rawLog,
|
||||
s: s,
|
||||
refDraws: refDraws,
|
||||
}
|
||||
|
||||
if rawLog == nil && flags.log {
|
||||
testName := "rapid test"
|
||||
if tb != nil {
|
||||
testName = tb.Name()
|
||||
}
|
||||
|
||||
t.rawLog = log.New(os.Stdout, fmt.Sprintf("[%v] ", testName), log.Lmsgprefix|log.Ldate|log.Ltime|log.Lmicroseconds)
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *T) shouldLog() bool {
|
||||
return t.rawLog != nil || t.tbLog
|
||||
}
|
||||
|
||||
// Context returns a context.Context that is canceled
|
||||
// after the property function exits,
|
||||
// before Cleanup-registered functions are run.
|
||||
//
|
||||
// For [Check], [MakeFuzz], and similar functions,
|
||||
// each call to the property function gets a unique context
|
||||
// that is canceled after that property function exits.
|
||||
//
|
||||
// For [Custom], each time a new value is generated,
|
||||
// the generator function gets a unique context
|
||||
// that is canceled after the generator function exits.
|
||||
func (t *T) Context() context.Context {
|
||||
// Fast path: no need to lock if the context is already set.
|
||||
t.mu.RLock()
|
||||
ctx := t.ctx
|
||||
t.mu.RUnlock()
|
||||
if ctx != nil {
|
||||
return ctx
|
||||
}
|
||||
|
||||
// If we're in the middle of cleaning up
|
||||
// and the context has already been canceled and cleared,
|
||||
// don't create a new one. Return a canceled context instead.
|
||||
if t.cleaning.Load() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
return ctx
|
||||
}
|
||||
|
||||
// Slow path: lock and check again, create new context if needed.
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.ctx != nil {
|
||||
// Another goroutine set the context
|
||||
// while we were waiting for the lock.
|
||||
return t.ctx
|
||||
}
|
||||
|
||||
// Use the testing.TB's context as the starting point if available,
|
||||
// and the Background context if not.
|
||||
//
|
||||
// T.Context was added in Go 1.24.
|
||||
if tctx, ok := t.tb.(interface{ Context() context.Context }); ok {
|
||||
ctx = tctx.Context()
|
||||
} else {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.ctx = ctx
|
||||
t.cancelCtx = cancel
|
||||
return ctx
|
||||
}
|
||||
|
||||
// Cleanup registers a function to be called
|
||||
// when a property function finishes running.
|
||||
//
|
||||
// For [Check], [MakeFuzz], and similar functions,
|
||||
// each call to the property function registers its cleanup functions,
|
||||
// which are called after the property function exits.
|
||||
//
|
||||
// For [Custom], each time a new value is generated,
|
||||
// the generator function registers its cleanup functions,
|
||||
// which are called after the generator function exits.
|
||||
//
|
||||
// Cleanup functions are called in last-in, first-out order.
|
||||
//
|
||||
// If [T.Context] is used, the context is canceled
|
||||
// before the Cleanup functions are executed.
|
||||
func (t *T) Cleanup(f func()) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.cleanups = append(t.cleanups, f)
|
||||
}
|
||||
|
||||
// cleanup runs any cleanup tasks associated with the property check.
|
||||
// It is safe to call multiple times.
|
||||
func (t *T) cleanup() {
|
||||
t.cleaning.Store(true)
|
||||
defer t.cleaning.Store(false)
|
||||
|
||||
// If a cleanup function panics,
|
||||
// we still want to run the remaining cleanup functions.
|
||||
defer func() {
|
||||
t.mu.Lock()
|
||||
recurse := len(t.cleanups) > 0
|
||||
t.mu.Unlock()
|
||||
|
||||
if recurse {
|
||||
t.cleanup()
|
||||
}
|
||||
}()
|
||||
|
||||
// Context must be closed before t.Cleanup functions are run.
|
||||
t.mu.Lock()
|
||||
if t.cancelCtx != nil {
|
||||
t.cancelCtx()
|
||||
t.cancelCtx = nil
|
||||
t.ctx = nil
|
||||
}
|
||||
t.mu.Unlock()
|
||||
|
||||
for {
|
||||
var cleanup func()
|
||||
t.mu.Lock()
|
||||
if len(t.cleanups) > 0 {
|
||||
last := len(t.cleanups) - 1
|
||||
cleanup = t.cleanups[last]
|
||||
t.cleanups = t.cleanups[:last]
|
||||
}
|
||||
t.mu.Unlock()
|
||||
|
||||
if cleanup == nil {
|
||||
break
|
||||
}
|
||||
|
||||
cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *T) Logf(format string, args ...any) {
|
||||
if t.rawLog != nil {
|
||||
t.rawLog.Printf(format, args...)
|
||||
} else if t.tbLog {
|
||||
t.tb.Helper()
|
||||
t.tb.Logf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *T) Log(args ...any) {
|
||||
if t.rawLog != nil {
|
||||
t.rawLog.Print(args...)
|
||||
} else if t.tbLog {
|
||||
t.tb.Helper()
|
||||
t.tb.Log(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Skipf is equivalent to [T.Logf] followed by [T.SkipNow].
|
||||
func (t *T) Skipf(format string, args ...any) {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
t.Logf(format, args...)
|
||||
t.skip(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Skip is equivalent to [T.Log] followed by [T.SkipNow].
|
||||
func (t *T) Skip(args ...any) {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
t.Log(args...)
|
||||
t.skip(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// SkipNow marks the current test case as invalid (except in [T.Repeat]
|
||||
// actions, where it marks current action as non-applicable instead).
|
||||
// If too many test cases are skipped, rapid will mark the test as failing
|
||||
// due to inability to generate enough valid test cases.
|
||||
//
|
||||
// Prefer *Generator.Filter to SkipNow, and prefer generators that always produce
|
||||
// valid test cases to Filter.
|
||||
func (t *T) SkipNow() {
|
||||
t.skip("(*T).SkipNow() called")
|
||||
}
|
||||
|
||||
// Errorf is equivalent to [T.Logf] followed by [T.Fail].
|
||||
func (t *T) Errorf(format string, args ...any) {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
t.Logf(format, args...)
|
||||
t.fail(false, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Error is equivalent to [T.Log] followed by [T.Fail].
|
||||
func (t *T) Error(args ...any) {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
t.Log(args...)
|
||||
t.fail(false, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Fatalf is equivalent to [T.Logf] followed by [T.FailNow].
|
||||
func (t *T) Fatalf(format string, args ...any) {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
t.Logf(format, args...)
|
||||
t.fail(true, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
// Fatal is equivalent to [T.Log] followed by [T.FailNow].
|
||||
func (t *T) Fatal(args ...any) {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
t.Log(args...)
|
||||
t.fail(true, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (t *T) FailNow() {
|
||||
t.fail(true, "(*T).FailNow() called")
|
||||
}
|
||||
|
||||
func (t *T) Fail() {
|
||||
t.fail(false, "(*T).Fail() called")
|
||||
}
|
||||
|
||||
func (t *T) Failed() bool {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
return t.failed != ""
|
||||
}
|
||||
|
||||
func (t *T) skip(msg string) {
|
||||
panic(invalidData(msg))
|
||||
}
|
||||
|
||||
func (t *T) fail(now bool, msg string) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
t.failed = stopTest(msg)
|
||||
if now {
|
||||
panic(t.failed)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *T) failOnError() {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
|
||||
if t.failed != "" {
|
||||
panic(t.failed)
|
||||
}
|
||||
}
|
||||
274
vendor/pgregory.net/rapid/floats.go
generated
vendored
Normal file
274
vendor/pgregory.net/rapid/floats.go
generated
vendored
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
float32ExpBits = 8
|
||||
float32SignifBits = 23
|
||||
|
||||
float64ExpBits = 11
|
||||
float64SignifBits = 52
|
||||
|
||||
floatExpLabel = "floatexp"
|
||||
floatSignifLabel = "floatsignif"
|
||||
)
|
||||
|
||||
// Float32 is a shorthand for [Float32Range](-[math.MaxFloat32], [math.MaxFloat32]).
|
||||
func Float32() *Generator[float32] {
|
||||
return Float32Range(-math.MaxFloat32, math.MaxFloat32)
|
||||
}
|
||||
|
||||
// Float32Min is a shorthand for [Float32Range](min, [math.MaxFloat32]).
|
||||
func Float32Min(min float32) *Generator[float32] {
|
||||
return Float32Range(min, math.MaxFloat32)
|
||||
}
|
||||
|
||||
// Float32Max is a shorthand for [Float32Range](-[math.MaxFloat32], max).
|
||||
func Float32Max(max float32) *Generator[float32] {
|
||||
return Float32Range(-math.MaxFloat32, max)
|
||||
}
|
||||
|
||||
// Float32Range creates a generator of 32-bit floating-point numbers in range [min, max].
|
||||
// Both min and max can be infinite.
|
||||
func Float32Range(min float32, max float32) *Generator[float32] {
|
||||
assertf(min == min, "min should not be a NaN")
|
||||
assertf(max == max, "max should not be a NaN")
|
||||
assertf(min <= max, "invalid range [%v, %v]", min, max)
|
||||
|
||||
return newGenerator[float32](&float32Gen{
|
||||
floatGen{
|
||||
min: float64(min),
|
||||
max: float64(max),
|
||||
minVal: -math.MaxFloat32,
|
||||
maxVal: math.MaxFloat32,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Float64 is a shorthand for [Float64Range](-[math.MaxFloat64], [math.MaxFloat64]).
|
||||
func Float64() *Generator[float64] {
|
||||
return Float64Range(-math.MaxFloat64, math.MaxFloat64)
|
||||
}
|
||||
|
||||
// Float64Min is a shorthand for [Float64Range](min, [math.MaxFloat64]).
|
||||
func Float64Min(min float64) *Generator[float64] {
|
||||
return Float64Range(min, math.MaxFloat64)
|
||||
}
|
||||
|
||||
// Float64Max is a shorthand for [Float64Range](-[math.MaxFloat64], max).
|
||||
func Float64Max(max float64) *Generator[float64] {
|
||||
return Float64Range(-math.MaxFloat64, max)
|
||||
}
|
||||
|
||||
// Float64Range creates a generator of 64-bit floating-point numbers in range [min, max].
|
||||
// Both min and max can be infinite.
|
||||
func Float64Range(min float64, max float64) *Generator[float64] {
|
||||
assertf(min == min, "min should not be a NaN")
|
||||
assertf(max == max, "max should not be a NaN")
|
||||
assertf(min <= max, "invalid range [%v, %v]", min, max)
|
||||
|
||||
return newGenerator[float64](&float64Gen{
|
||||
floatGen{
|
||||
min: min,
|
||||
max: max,
|
||||
minVal: -math.MaxFloat64,
|
||||
maxVal: math.MaxFloat64,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type floatGen struct {
|
||||
min float64
|
||||
max float64
|
||||
minVal float64
|
||||
maxVal float64
|
||||
}
|
||||
type float32Gen struct{ floatGen }
|
||||
type float64Gen struct{ floatGen }
|
||||
|
||||
func (g *floatGen) stringImpl(kind string) string {
|
||||
if g.min != g.minVal && g.max != g.maxVal {
|
||||
return fmt.Sprintf("%sRange(%g, %g)", kind, g.min, g.max)
|
||||
} else if g.min != g.minVal {
|
||||
return fmt.Sprintf("%sMin(%g)", kind, g.min)
|
||||
} else if g.max != g.maxVal {
|
||||
return fmt.Sprintf("%sMax(%g)", kind, g.max)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s()", kind)
|
||||
}
|
||||
func (g *float32Gen) String() string {
|
||||
return g.stringImpl("Float32")
|
||||
}
|
||||
func (g *float64Gen) String() string {
|
||||
return g.stringImpl("Float64")
|
||||
}
|
||||
|
||||
func (g *float32Gen) value(t *T) float32 {
|
||||
return float32FromParts(genFloatRange(t.s, g.min, g.max, float32SignifBits))
|
||||
}
|
||||
func (g *float64Gen) value(t *T) float64 {
|
||||
return float64FromParts(genFloatRange(t.s, g.min, g.max, float64SignifBits))
|
||||
}
|
||||
|
||||
func ufloatFracBits(e int32, signifBits uint) uint {
|
||||
if e <= 0 {
|
||||
return signifBits
|
||||
} else if uint(e) < signifBits {
|
||||
return signifBits - uint(e)
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func ufloat32Parts(f float32) (int32, uint64, uint64) {
|
||||
u := math.Float32bits(f) & math.MaxInt32
|
||||
|
||||
e := int32(u>>float32SignifBits) - int32(bitmask64(float32ExpBits-1))
|
||||
s := uint64(u) & bitmask64(float32SignifBits)
|
||||
n := ufloatFracBits(e, float32SignifBits)
|
||||
|
||||
return e, s >> n, s & bitmask64(n)
|
||||
}
|
||||
|
||||
func ufloat64Parts(f float64) (int32, uint64, uint64) {
|
||||
u := math.Float64bits(f) & math.MaxInt64
|
||||
|
||||
e := int32(u>>float64SignifBits) - int32(bitmask64(float64ExpBits-1))
|
||||
s := u & bitmask64(float64SignifBits)
|
||||
n := ufloatFracBits(e, float64SignifBits)
|
||||
|
||||
return e, s >> n, s & bitmask64(n)
|
||||
}
|
||||
|
||||
func ufloat32FromParts(e int32, si uint64, sf uint64) float32 {
|
||||
e_ := (uint32(e) + uint32(bitmask64(float32ExpBits-1))) << float32SignifBits
|
||||
s_ := (uint32(si) << ufloatFracBits(e, float32SignifBits)) | uint32(sf)
|
||||
|
||||
return math.Float32frombits(e_ | s_)
|
||||
}
|
||||
|
||||
func ufloat64FromParts(e int32, si uint64, sf uint64) float64 {
|
||||
e_ := (uint64(e) + bitmask64(float64ExpBits-1)) << float64SignifBits
|
||||
s_ := (si << ufloatFracBits(e, float64SignifBits)) | sf
|
||||
|
||||
return math.Float64frombits(e_ | s_)
|
||||
}
|
||||
|
||||
func float32FromParts(sign bool, e int32, si uint64, sf uint64) float32 {
|
||||
f := ufloat32FromParts(e, si, sf)
|
||||
if sign {
|
||||
return -f
|
||||
} else {
|
||||
return f
|
||||
}
|
||||
}
|
||||
|
||||
func float64FromParts(sign bool, e int32, si uint64, sf uint64) float64 {
|
||||
f := ufloat64FromParts(e, si, sf)
|
||||
if sign {
|
||||
return -f
|
||||
} else {
|
||||
return f
|
||||
}
|
||||
}
|
||||
|
||||
func genUfloatRange(s bitStream, min float64, max float64, signifBits uint) (int32, uint64, uint64) {
|
||||
assert(min >= 0 && min <= max)
|
||||
|
||||
var (
|
||||
minExp, maxExp int32
|
||||
minSignifI, maxSignifI, minSignifF, maxSignifF uint64
|
||||
)
|
||||
if signifBits == float32SignifBits {
|
||||
minExp, minSignifI, minSignifF = ufloat32Parts(float32(min))
|
||||
maxExp, maxSignifI, maxSignifF = ufloat32Parts(float32(max))
|
||||
} else {
|
||||
minExp, minSignifI, minSignifF = ufloat64Parts(min)
|
||||
maxExp, maxSignifI, maxSignifF = ufloat64Parts(max)
|
||||
}
|
||||
|
||||
i := s.beginGroup(floatExpLabel, false)
|
||||
e, lOverflow, rOverflow := genIntRange(s, int64(minExp), int64(maxExp), true)
|
||||
s.endGroup(i, false)
|
||||
|
||||
fracBits := ufloatFracBits(int32(e), signifBits)
|
||||
|
||||
j := s.beginGroup(floatSignifLabel, false)
|
||||
var siMin, siMax uint64
|
||||
switch {
|
||||
case lOverflow:
|
||||
siMin, siMax = minSignifI, minSignifI
|
||||
case rOverflow:
|
||||
siMin, siMax = maxSignifI, maxSignifI
|
||||
case minExp == maxExp:
|
||||
siMin, siMax = minSignifI, maxSignifI
|
||||
case int32(e) == minExp:
|
||||
siMin, siMax = minSignifI, bitmask64(signifBits-fracBits)
|
||||
case int32(e) == maxExp:
|
||||
siMin, siMax = 0, maxSignifI
|
||||
default:
|
||||
siMin, siMax = 0, bitmask64(signifBits-fracBits)
|
||||
}
|
||||
si, _, _ := genUintRange(s, siMin, siMax, false)
|
||||
var sfMin, sfMax uint64
|
||||
switch {
|
||||
case lOverflow:
|
||||
sfMin, sfMax = minSignifF, minSignifF
|
||||
case rOverflow:
|
||||
sfMin, sfMax = maxSignifF, maxSignifF
|
||||
case minExp == maxExp && minSignifI == maxSignifI:
|
||||
sfMin, sfMax = minSignifF, maxSignifF
|
||||
case int32(e) == minExp && si == minSignifI:
|
||||
sfMin, sfMax = minSignifF, bitmask64(fracBits)
|
||||
case int32(e) == maxExp && si == maxSignifI:
|
||||
sfMin, sfMax = 0, maxSignifF
|
||||
default:
|
||||
sfMin, sfMax = 0, bitmask64(fracBits)
|
||||
}
|
||||
maxR := bits.Len64(sfMax - sfMin)
|
||||
r := genUintNNoReject(s, uint64(maxR))
|
||||
sf, _, _ := genUintRange(s, sfMin, sfMax, false)
|
||||
s.endGroup(j, false)
|
||||
|
||||
for i := uint(0); i < uint(maxR)-uint(r); i++ {
|
||||
mask := ^(uint64(1) << i)
|
||||
if sf&mask < sfMin {
|
||||
break
|
||||
}
|
||||
sf &= mask
|
||||
}
|
||||
|
||||
return int32(e), si, sf
|
||||
}
|
||||
|
||||
func genFloatRange(s bitStream, min float64, max float64, signifBits uint) (bool, int32, uint64, uint64) {
|
||||
var posMin, negMin, pNeg float64
|
||||
if min >= 0 {
|
||||
posMin = min
|
||||
pNeg = 0
|
||||
} else if max <= 0 {
|
||||
negMin = -max
|
||||
pNeg = 1
|
||||
} else {
|
||||
pNeg = 0.5
|
||||
}
|
||||
|
||||
if flipBiasedCoin(s, pNeg) {
|
||||
e, si, sf := genUfloatRange(s, negMin, -min, signifBits)
|
||||
return true, e, si, sf
|
||||
} else {
|
||||
e, si, sf := genUfloatRange(s, posMin, max, signifBits)
|
||||
return false, e, si, sf
|
||||
}
|
||||
}
|
||||
121
vendor/pgregory.net/rapid/generator.go
generated
vendored
Normal file
121
vendor/pgregory.net/rapid/generator.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type generatorImpl[V any] interface {
|
||||
String() string
|
||||
value(t *T) V
|
||||
}
|
||||
|
||||
// Generator describes a generator of values of type V.
|
||||
type Generator[V any] struct {
|
||||
impl generatorImpl[V]
|
||||
strOnce sync.Once
|
||||
str string
|
||||
}
|
||||
|
||||
func newGenerator[V any](impl generatorImpl[V]) *Generator[V] {
|
||||
return &Generator[V]{
|
||||
impl: impl,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Generator[V]) String() string {
|
||||
g.strOnce.Do(func() {
|
||||
g.str = g.impl.String()
|
||||
})
|
||||
|
||||
return g.str
|
||||
}
|
||||
|
||||
// Draw produces a value from the generator.
|
||||
func (g *Generator[V]) Draw(t *T, label string) V {
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
|
||||
v := g.value(t)
|
||||
|
||||
if len(t.refDraws) > 0 {
|
||||
ref := t.refDraws[t.draws]
|
||||
if !reflect.DeepEqual(v, ref) {
|
||||
t.tb.Fatalf("draw %v differs: %#v vs expected %#v", t.draws, v, ref)
|
||||
}
|
||||
}
|
||||
|
||||
if t.tbLog || t.rawLog != nil {
|
||||
if label == "" {
|
||||
label = fmt.Sprintf("#%v", t.draws)
|
||||
}
|
||||
|
||||
if t.tbLog {
|
||||
t.tb.Helper()
|
||||
}
|
||||
t.Logf("[rapid] draw %v: %#v", label, v)
|
||||
}
|
||||
|
||||
t.draws++
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (g *Generator[V]) value(t *T) V {
|
||||
i := t.s.beginGroup(g.str, true)
|
||||
v := g.impl.value(t)
|
||||
t.s.endGroup(i, false)
|
||||
return v
|
||||
}
|
||||
|
||||
// Example produces an example value from the generator. If seed is provided, value is produced deterministically
|
||||
// based on seed. Example should only be used for examples; always use *Generator.Draw in property-based tests.
|
||||
func (g *Generator[V]) Example(seed ...int) V {
|
||||
s := baseSeed()
|
||||
if len(seed) > 0 {
|
||||
s = uint64(seed[0])
|
||||
}
|
||||
|
||||
v, n, err := example(g, newT(nil, newRandomBitStream(s, false), false, nil))
|
||||
assertf(err == nil, "%v failed to generate an example in %v tries: %v", g, n, err)
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// Filter creates a generator producing only values from g for which fn returns true.
|
||||
func (g *Generator[V]) Filter(fn func(V) bool) *Generator[V] {
|
||||
return filter(g, fn)
|
||||
}
|
||||
|
||||
// AsAny creates a generator producing values from g converted to any.
|
||||
func (g *Generator[V]) AsAny() *Generator[any] {
|
||||
return asAny(g)
|
||||
}
|
||||
|
||||
func example[V any](g *Generator[V], t *T) (V, int, error) {
|
||||
defer t.cleanup()
|
||||
|
||||
for i := 1; ; i++ {
|
||||
r, err := recoverValue(g, t)
|
||||
if err == nil {
|
||||
return r, i, nil
|
||||
} else if i == exampleMaxTries {
|
||||
var zero V
|
||||
return zero, i, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func recoverValue[V any](g *Generator[V], t *T) (v V, err *testError) {
|
||||
defer func() { err = panicToError(recover(), 3) }()
|
||||
|
||||
return g.value(t), nil
|
||||
}
|
||||
272
vendor/pgregory.net/rapid/integers.go
generated
vendored
Normal file
272
vendor/pgregory.net/rapid/integers.go
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
byteKind = "Byte"
|
||||
intKind = "Int"
|
||||
int8Kind = "Int8"
|
||||
int16Kind = "Int16"
|
||||
int32Kind = "Int32"
|
||||
int64Kind = "Int64"
|
||||
uintKind = "Uint"
|
||||
uint8Kind = "Uint8"
|
||||
uint16Kind = "Uint16"
|
||||
uint32Kind = "Uint32"
|
||||
uint64Kind = "Uint64"
|
||||
uintptrKind = "Uintptr"
|
||||
|
||||
uintptrSize = 32 << (^uintptr(0) >> 32 & 1)
|
||||
uintSize = 32 << (^uint(0) >> 32 & 1)
|
||||
intSize = uintSize
|
||||
|
||||
maxUintptr = 1<<(uint(uintptrSize)) - 1
|
||||
)
|
||||
|
||||
var (
|
||||
integerKindToInfo = map[string]integerKindInfo{
|
||||
byteKind: {size: 1, umax: math.MaxUint8},
|
||||
intKind: {signed: true, size: intSize / 8, smin: math.MinInt, smax: math.MaxInt},
|
||||
int8Kind: {signed: true, size: 1, smin: math.MinInt8, smax: math.MaxInt8},
|
||||
int16Kind: {signed: true, size: 2, smin: math.MinInt16, smax: math.MaxInt16},
|
||||
int32Kind: {signed: true, size: 4, smin: math.MinInt32, smax: math.MaxInt32},
|
||||
int64Kind: {signed: true, size: 8, smin: math.MinInt64, smax: math.MaxInt64},
|
||||
uintKind: {size: uintSize / 8, umax: math.MaxUint},
|
||||
uint8Kind: {size: 1, umax: math.MaxUint8},
|
||||
uint16Kind: {size: 2, umax: math.MaxUint16},
|
||||
uint32Kind: {size: 4, umax: math.MaxUint32},
|
||||
uint64Kind: {size: 8, umax: math.MaxUint64},
|
||||
uintptrKind: {size: uintptrSize / 8, umax: maxUintptr},
|
||||
}
|
||||
)
|
||||
|
||||
type integer interface {
|
||||
~int | ~int8 | ~int16 | ~int32 | ~int64 |
|
||||
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
|
||||
}
|
||||
|
||||
type integerKindInfo struct {
|
||||
signed bool
|
||||
size int
|
||||
smin int64
|
||||
smax int64
|
||||
umax uint64
|
||||
}
|
||||
|
||||
type boolGen struct{}
|
||||
|
||||
func Bool() *Generator[bool] { return newGenerator[bool](&boolGen{}) }
|
||||
func (g *boolGen) String() string { return "Bool()" }
|
||||
func (g *boolGen) value(t *T) bool { return t.s.drawBits(1) == 1 }
|
||||
|
||||
func Byte() *Generator[byte] { return newIntegerGen[byte](byteKind) }
|
||||
func Int() *Generator[int] { return newIntegerGen[int](intKind) }
|
||||
func Int8() *Generator[int8] { return newIntegerGen[int8](int8Kind) }
|
||||
func Int16() *Generator[int16] { return newIntegerGen[int16](int16Kind) }
|
||||
func Int32() *Generator[int32] { return newIntegerGen[int32](int32Kind) }
|
||||
func Int64() *Generator[int64] { return newIntegerGen[int64](int64Kind) }
|
||||
func Uint() *Generator[uint] { return newIntegerGen[uint](uintKind) }
|
||||
func Uint8() *Generator[uint8] { return newIntegerGen[uint8](uint8Kind) }
|
||||
func Uint16() *Generator[uint16] { return newIntegerGen[uint16](uint16Kind) }
|
||||
func Uint32() *Generator[uint32] { return newIntegerGen[uint32](uint32Kind) }
|
||||
func Uint64() *Generator[uint64] { return newIntegerGen[uint64](uint64Kind) }
|
||||
func Uintptr() *Generator[uintptr] { return newIntegerGen[uintptr](uintptrKind) }
|
||||
|
||||
func ByteMin(min byte) *Generator[byte] { return newUintMinGen[byte](byteKind, uint64(min)) }
|
||||
func IntMin(min int) *Generator[int] { return newIntMinGen[int](intKind, int64(min)) }
|
||||
func Int8Min(min int8) *Generator[int8] { return newIntMinGen[int8](int8Kind, int64(min)) }
|
||||
func Int16Min(min int16) *Generator[int16] { return newIntMinGen[int16](int16Kind, int64(min)) }
|
||||
func Int32Min(min int32) *Generator[int32] { return newIntMinGen[int32](int32Kind, int64(min)) }
|
||||
func Int64Min(min int64) *Generator[int64] { return newIntMinGen[int64](int64Kind, min) }
|
||||
func UintMin(min uint) *Generator[uint] { return newUintMinGen[uint](uintKind, uint64(min)) }
|
||||
func Uint8Min(min uint8) *Generator[uint8] { return newUintMinGen[uint8](uint8Kind, uint64(min)) }
|
||||
func Uint16Min(min uint16) *Generator[uint16] { return newUintMinGen[uint16](uint16Kind, uint64(min)) }
|
||||
func Uint32Min(min uint32) *Generator[uint32] { return newUintMinGen[uint32](uint32Kind, uint64(min)) }
|
||||
func Uint64Min(min uint64) *Generator[uint64] { return newUintMinGen[uint64](uint64Kind, min) }
|
||||
func UintptrMin(min uintptr) *Generator[uintptr] {
|
||||
return newUintMinGen[uintptr](uintptrKind, uint64(min))
|
||||
}
|
||||
|
||||
func ByteMax(max byte) *Generator[byte] { return newUintMaxGen[byte](byteKind, uint64(max)) }
|
||||
func IntMax(max int) *Generator[int] { return newIntMaxGen[int](intKind, int64(max)) }
|
||||
func Int8Max(max int8) *Generator[int8] { return newIntMaxGen[int8](int8Kind, int64(max)) }
|
||||
func Int16Max(max int16) *Generator[int16] { return newIntMaxGen[int16](int16Kind, int64(max)) }
|
||||
func Int32Max(max int32) *Generator[int32] { return newIntMaxGen[int32](int32Kind, int64(max)) }
|
||||
func Int64Max(max int64) *Generator[int64] { return newIntMaxGen[int64](int64Kind, max) }
|
||||
func UintMax(max uint) *Generator[uint] { return newUintMaxGen[uint](uintKind, uint64(max)) }
|
||||
func Uint8Max(max uint8) *Generator[uint8] { return newUintMaxGen[uint8](uint8Kind, uint64(max)) }
|
||||
func Uint16Max(max uint16) *Generator[uint16] { return newUintMaxGen[uint16](uint16Kind, uint64(max)) }
|
||||
func Uint32Max(max uint32) *Generator[uint32] { return newUintMaxGen[uint32](uint32Kind, uint64(max)) }
|
||||
func Uint64Max(max uint64) *Generator[uint64] { return newUintMaxGen[uint64](uint64Kind, max) }
|
||||
func UintptrMax(max uintptr) *Generator[uintptr] {
|
||||
return newUintMaxGen[uintptr](uintptrKind, uint64(max))
|
||||
}
|
||||
|
||||
func ByteRange(min byte, max byte) *Generator[byte] {
|
||||
return newUintRangeGen[byte](byteKind, uint64(min), uint64(max))
|
||||
}
|
||||
func IntRange(min int, max int) *Generator[int] {
|
||||
return newIntRangeGen[int](intKind, int64(min), int64(max))
|
||||
}
|
||||
func Int8Range(min int8, max int8) *Generator[int8] {
|
||||
return newIntRangeGen[int8](int8Kind, int64(min), int64(max))
|
||||
}
|
||||
func Int16Range(min int16, max int16) *Generator[int16] {
|
||||
return newIntRangeGen[int16](int16Kind, int64(min), int64(max))
|
||||
}
|
||||
func Int32Range(min int32, max int32) *Generator[int32] {
|
||||
return newIntRangeGen[int32](int32Kind, int64(min), int64(max))
|
||||
}
|
||||
func Int64Range(min int64, max int64) *Generator[int64] {
|
||||
return newIntRangeGen[int64](int64Kind, min, max)
|
||||
}
|
||||
func UintRange(min uint, max uint) *Generator[uint] {
|
||||
return newUintRangeGen[uint](uintKind, uint64(min), uint64(max))
|
||||
}
|
||||
func Uint8Range(min uint8, max uint8) *Generator[uint8] {
|
||||
return newUintRangeGen[uint8](uint8Kind, uint64(min), uint64(max))
|
||||
}
|
||||
func Uint16Range(min uint16, max uint16) *Generator[uint16] {
|
||||
return newUintRangeGen[uint16](uint16Kind, uint64(min), uint64(max))
|
||||
}
|
||||
func Uint32Range(min uint32, max uint32) *Generator[uint32] {
|
||||
return newUintRangeGen[uint32](uint32Kind, uint64(min), uint64(max))
|
||||
}
|
||||
func Uint64Range(min uint64, max uint64) *Generator[uint64] {
|
||||
return newUintRangeGen[uint64](uint64Kind, min, max)
|
||||
}
|
||||
func UintptrRange(min uintptr, max uintptr) *Generator[uintptr] {
|
||||
return newUintRangeGen[uintptr](uintptrKind, uint64(min), uint64(max))
|
||||
}
|
||||
|
||||
func newIntegerGen[I integer](kind string) *Generator[I] {
|
||||
return newGenerator[I](&integerGen[I]{
|
||||
integerKindInfo: integerKindToInfo[kind],
|
||||
kind: kind,
|
||||
})
|
||||
}
|
||||
|
||||
func newIntRangeGen[I integer](kind string, min int64, max int64) *Generator[I] {
|
||||
assertf(min <= max, "invalid integer range [%v, %v]", min, max)
|
||||
|
||||
g := &integerGen[I]{
|
||||
integerKindInfo: integerKindToInfo[kind],
|
||||
kind: kind,
|
||||
hasMin: true,
|
||||
hasMax: true,
|
||||
}
|
||||
g.smin = min
|
||||
g.smax = max
|
||||
|
||||
return newGenerator[I](g)
|
||||
}
|
||||
|
||||
func newIntMinGen[I integer](kind string, min int64) *Generator[I] {
|
||||
g := &integerGen[I]{
|
||||
integerKindInfo: integerKindToInfo[kind],
|
||||
kind: kind,
|
||||
hasMin: true,
|
||||
}
|
||||
g.smin = min
|
||||
|
||||
return newGenerator[I](g)
|
||||
}
|
||||
|
||||
func newIntMaxGen[I integer](kind string, max int64) *Generator[I] {
|
||||
g := &integerGen[I]{
|
||||
integerKindInfo: integerKindToInfo[kind],
|
||||
kind: kind,
|
||||
hasMax: true,
|
||||
}
|
||||
g.smax = max
|
||||
|
||||
return newGenerator[I](g)
|
||||
}
|
||||
|
||||
func newUintRangeGen[I integer](kind string, min uint64, max uint64) *Generator[I] {
|
||||
assertf(min <= max, "invalid integer range [%v, %v]", min, max)
|
||||
|
||||
g := &integerGen[I]{
|
||||
integerKindInfo: integerKindToInfo[kind],
|
||||
kind: kind,
|
||||
hasMin: true,
|
||||
hasMax: true,
|
||||
}
|
||||
g.umin = min
|
||||
g.umax = max
|
||||
|
||||
return newGenerator[I](g)
|
||||
}
|
||||
|
||||
func newUintMinGen[I integer](kind string, min uint64) *Generator[I] {
|
||||
g := &integerGen[I]{
|
||||
integerKindInfo: integerKindToInfo[kind],
|
||||
kind: kind,
|
||||
hasMin: true,
|
||||
}
|
||||
g.umin = min
|
||||
|
||||
return newGenerator[I](g)
|
||||
}
|
||||
|
||||
func newUintMaxGen[I integer](kind string, max uint64) *Generator[I] {
|
||||
g := &integerGen[I]{
|
||||
integerKindInfo: integerKindToInfo[kind],
|
||||
kind: kind,
|
||||
hasMax: true,
|
||||
}
|
||||
g.umax = max
|
||||
|
||||
return newGenerator[I](g)
|
||||
}
|
||||
|
||||
type integerGen[I integer] struct {
|
||||
integerKindInfo
|
||||
kind string
|
||||
umin uint64
|
||||
hasMin bool
|
||||
hasMax bool
|
||||
}
|
||||
|
||||
func (g *integerGen[I]) String() string {
|
||||
if g.hasMin && g.hasMax {
|
||||
if g.signed {
|
||||
return fmt.Sprintf("%sRange(%d, %d)", g.kind, g.smin, g.smax)
|
||||
} else {
|
||||
return fmt.Sprintf("%sRange(%d, %d)", g.kind, g.umin, g.umax)
|
||||
}
|
||||
} else if g.hasMin {
|
||||
if g.signed {
|
||||
return fmt.Sprintf("%sMin(%d)", g.kind, g.smin)
|
||||
} else {
|
||||
return fmt.Sprintf("%sMin(%d)", g.kind, g.umin)
|
||||
}
|
||||
} else if g.hasMax {
|
||||
if g.signed {
|
||||
return fmt.Sprintf("%sMax(%d)", g.kind, g.smax)
|
||||
} else {
|
||||
return fmt.Sprintf("%sMax(%d)", g.kind, g.umax)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s()", g.kind)
|
||||
}
|
||||
|
||||
func (g *integerGen[I]) value(t *T) I {
|
||||
if g.signed {
|
||||
i, _, _ := genIntRange(t.s, g.smin, g.smax, true)
|
||||
return I(i)
|
||||
} else {
|
||||
u, _, _ := genUintRange(t.s, g.umin, g.umax, true)
|
||||
return I(u)
|
||||
}
|
||||
}
|
||||
196
vendor/pgregory.net/rapid/make.go
generated
vendored
Normal file
196
vendor/pgregory.net/rapid/make.go
generated
vendored
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2022 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Make creates a generator of values of type V, using reflection to infer the required structure.
|
||||
// Currently, Make may be unable to terminate generation of values of some recursive types, thus using
|
||||
// Make with recursive types requires extra care.
|
||||
func Make[V any]() *Generator[V] {
|
||||
var zero V
|
||||
gen := newMakeGen(reflect.TypeOf(zero))
|
||||
return newGenerator[V](&makeGen[V]{
|
||||
gen: gen,
|
||||
})
|
||||
}
|
||||
|
||||
type makeGen[V any] struct {
|
||||
gen *Generator[any]
|
||||
}
|
||||
|
||||
func (g *makeGen[V]) String() string {
|
||||
var zero V
|
||||
return fmt.Sprintf("Make[%T]()", zero)
|
||||
}
|
||||
|
||||
func (g *makeGen[V]) value(t *T) V {
|
||||
return g.gen.value(t).(V)
|
||||
}
|
||||
|
||||
func newMakeGen(typ reflect.Type) *Generator[any] {
|
||||
gen, mayNeedCast := newMakeKindGen(typ)
|
||||
if !mayNeedCast || typ.String() == typ.Kind().String() {
|
||||
return gen // fast path with less reflect
|
||||
}
|
||||
return newGenerator[any](&castGen{gen, typ})
|
||||
}
|
||||
|
||||
type castGen struct {
|
||||
gen *Generator[any]
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
func (g *castGen) String() string {
|
||||
return fmt.Sprintf("cast(%v, %v)", g.gen, g.typ.Name())
|
||||
}
|
||||
|
||||
func (g *castGen) value(t *T) any {
|
||||
v := g.gen.value(t)
|
||||
return reflect.ValueOf(v).Convert(g.typ).Interface()
|
||||
}
|
||||
|
||||
func newMakeKindGen(typ reflect.Type) (gen *Generator[any], mayNeedCast bool) {
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return Bool().AsAny(), true
|
||||
case reflect.Int:
|
||||
return Int().AsAny(), true
|
||||
case reflect.Int8:
|
||||
return Int8().AsAny(), true
|
||||
case reflect.Int16:
|
||||
return Int16().AsAny(), true
|
||||
case reflect.Int32:
|
||||
return Int32().AsAny(), true
|
||||
case reflect.Int64:
|
||||
return Int64().AsAny(), true
|
||||
case reflect.Uint:
|
||||
return Uint().AsAny(), true
|
||||
case reflect.Uint8:
|
||||
return Uint8().AsAny(), true
|
||||
case reflect.Uint16:
|
||||
return Uint16().AsAny(), true
|
||||
case reflect.Uint32:
|
||||
return Uint32().AsAny(), true
|
||||
case reflect.Uint64:
|
||||
return Uint64().AsAny(), true
|
||||
case reflect.Uintptr:
|
||||
return Uintptr().AsAny(), true
|
||||
case reflect.Float32:
|
||||
return Float32().AsAny(), true
|
||||
case reflect.Float64:
|
||||
return Float64().AsAny(), true
|
||||
case reflect.Array:
|
||||
return genAnyArray(typ), false
|
||||
case reflect.Map:
|
||||
return genAnyMap(typ), false
|
||||
case reflect.Pointer:
|
||||
return Deferred(func() *Generator[any] { return genAnyPointer(typ) }), false
|
||||
case reflect.Slice:
|
||||
return genAnySlice(typ), false
|
||||
case reflect.String:
|
||||
return String().AsAny(), true
|
||||
case reflect.Struct:
|
||||
return genAnyStruct(typ), false
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported type kind for Make: %v", typ.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func genAnyPointer(typ reflect.Type) *Generator[any] {
|
||||
elem := typ.Elem()
|
||||
elemGen := newMakeGen(elem)
|
||||
const pNonNil = 0.5
|
||||
|
||||
return Custom[any](func(t *T) any {
|
||||
if flipBiasedCoin(t.s, pNonNil) {
|
||||
val := elemGen.value(t)
|
||||
ptr := reflect.New(elem)
|
||||
ptr.Elem().Set(reflect.ValueOf(val))
|
||||
return ptr.Interface()
|
||||
} else {
|
||||
return reflect.Zero(typ).Interface()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func genAnyArray(typ reflect.Type) *Generator[any] {
|
||||
count := typ.Len()
|
||||
elemGen := newMakeGen(typ.Elem())
|
||||
|
||||
return Custom[any](func(t *T) any {
|
||||
a := reflect.Indirect(reflect.New(typ))
|
||||
if count == 0 {
|
||||
t.s.drawBits(0)
|
||||
} else {
|
||||
for i := 0; i < count; i++ {
|
||||
e := reflect.ValueOf(elemGen.value(t))
|
||||
a.Index(i).Set(e)
|
||||
}
|
||||
}
|
||||
return a.Interface()
|
||||
})
|
||||
}
|
||||
|
||||
func genAnySlice(typ reflect.Type) *Generator[any] {
|
||||
elemGen := newMakeGen(typ.Elem())
|
||||
|
||||
return Custom[any](func(t *T) any {
|
||||
repeat := newRepeat(-1, -1, -1, elemGen.String())
|
||||
sl := reflect.MakeSlice(typ, 0, repeat.avg())
|
||||
for repeat.more(t.s) {
|
||||
e := reflect.ValueOf(elemGen.value(t))
|
||||
sl = reflect.Append(sl, e)
|
||||
}
|
||||
return sl.Interface()
|
||||
})
|
||||
}
|
||||
|
||||
func genAnyMap(typ reflect.Type) *Generator[any] {
|
||||
keyGen := newMakeGen(typ.Key())
|
||||
valGen := newMakeGen(typ.Elem())
|
||||
|
||||
return Custom[any](func(t *T) any {
|
||||
label := keyGen.String() + "," + valGen.String()
|
||||
repeat := newRepeat(-1, -1, -1, label)
|
||||
m := reflect.MakeMapWithSize(typ, repeat.avg())
|
||||
for repeat.more(t.s) {
|
||||
k := reflect.ValueOf(keyGen.value(t))
|
||||
v := reflect.ValueOf(valGen.value(t))
|
||||
if m.MapIndex(k).IsValid() {
|
||||
repeat.reject()
|
||||
} else {
|
||||
m.SetMapIndex(k, v)
|
||||
}
|
||||
}
|
||||
return m.Interface()
|
||||
})
|
||||
}
|
||||
|
||||
func genAnyStruct(typ reflect.Type) *Generator[any] {
|
||||
numFields := typ.NumField()
|
||||
fieldGens := make([]*Generator[any], numFields)
|
||||
for i := 0; i < numFields; i++ {
|
||||
fieldGens[i] = newMakeGen(typ.Field(i).Type)
|
||||
}
|
||||
|
||||
return Custom[any](func(t *T) any {
|
||||
s := reflect.Indirect(reflect.New(typ))
|
||||
if numFields == 0 {
|
||||
t.s.drawBits(0)
|
||||
} else {
|
||||
for i := 0; i < numFields; i++ {
|
||||
f := reflect.ValueOf(fieldGens[i].value(t))
|
||||
s.Field(i).Set(f)
|
||||
}
|
||||
}
|
||||
return s.Interface()
|
||||
})
|
||||
}
|
||||
152
vendor/pgregory.net/rapid/persist.go
generated
vendored
Normal file
152
vendor/pgregory.net/rapid/persist.go
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
// Copyright 2020 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const (
|
||||
rapidVersion = "v0.4.8"
|
||||
|
||||
persistDirMode = 0775
|
||||
failfileTmpPattern = ".rapid-failfile-tmp-*"
|
||||
)
|
||||
|
||||
var (
|
||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
windowsReservedNames = []string{
|
||||
"CON", "PRN", "AUX", "NUL",
|
||||
"COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "COM¹", "COM²", "COM³",
|
||||
"LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9", "LPT¹", "LPT²", "LPT³",
|
||||
}
|
||||
)
|
||||
|
||||
func kindaSafeFilename(f string) string {
|
||||
var s strings.Builder
|
||||
for _, r := range f {
|
||||
if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '-' || r == '_' {
|
||||
s.WriteRune(r)
|
||||
} else {
|
||||
s.WriteRune('_')
|
||||
}
|
||||
}
|
||||
name := s.String()
|
||||
nameUpper := strings.ToUpper(name)
|
||||
for _, reserved := range windowsReservedNames {
|
||||
if nameUpper == reserved {
|
||||
return name + "_"
|
||||
}
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func failFileName(testName string) (string, string) {
|
||||
ts := time.Now().Format("20060102150405")
|
||||
fileName := fmt.Sprintf("%s-%s-%d.fail", kindaSafeFilename(testName), ts, os.Getpid())
|
||||
dirName := filepath.Join("testdata", "rapid", kindaSafeFilename(testName))
|
||||
return dirName, filepath.Join(dirName, fileName)
|
||||
}
|
||||
|
||||
func failFilePattern(testName string) string {
|
||||
fileName := fmt.Sprintf("%s-*.fail", kindaSafeFilename(testName))
|
||||
dirName := filepath.Join("testdata", "rapid", kindaSafeFilename(testName))
|
||||
return filepath.Join(dirName, fileName)
|
||||
}
|
||||
|
||||
func saveFailFile(filename string, version string, output []byte, seed uint64, buf []uint64) error {
|
||||
dir := filepath.Dir(filename)
|
||||
err := os.MkdirAll(dir, persistDirMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory for fail file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
f, err := os.CreateTemp(dir, failfileTmpPattern)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary file for fail file %q: %w", filename, err)
|
||||
}
|
||||
defer func() { _ = os.Remove(f.Name()) }()
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
out := strings.Split(string(output), "\n")
|
||||
for _, s := range out {
|
||||
_, err := f.WriteString("# " + s + "\n")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write data to fail file %q: %w", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
bs := []string{fmt.Sprintf("%v#%v", version, seed)}
|
||||
for _, u := range buf {
|
||||
bs = append(bs, fmt.Sprintf("0x%x", u))
|
||||
}
|
||||
|
||||
_, err = f.WriteString(strings.Join(bs, "\n"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write data to fail file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
_ = f.Close() // early close, otherwise os.Rename will fail on Windows
|
||||
err = os.Rename(f.Name(), filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save fail file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadFailFile(filename string) (string, uint64, []uint64, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to open fail file: %w", err)
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
var data []string
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
s := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(s, "#") || s == "" {
|
||||
continue
|
||||
}
|
||||
data = append(data, s)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to load fail file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
if len(data) == 0 {
|
||||
return "", 0, nil, fmt.Errorf("no data in fail file %q", filename)
|
||||
}
|
||||
|
||||
split := strings.Split(data[0], "#")
|
||||
if len(split) != 2 {
|
||||
return "", 0, nil, fmt.Errorf("invalid version/seed field %q in %q", data[0], filename)
|
||||
}
|
||||
seed, err := strconv.ParseUint(split[1], 10, 64)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("invalid seed %q in %q", split[1], filename)
|
||||
}
|
||||
|
||||
var buf []uint64
|
||||
for _, b := range data[1:] {
|
||||
u, err := strconv.ParseUint(b, 0, 64)
|
||||
if err != nil {
|
||||
return "", 0, nil, fmt.Errorf("failed to load fail file %q: %w", filename, err)
|
||||
}
|
||||
buf = append(buf, u)
|
||||
}
|
||||
|
||||
return split[0], seed, buf, nil
|
||||
}
|
||||
408
vendor/pgregory.net/rapid/shrink.go
generated
vendored
Normal file
408
vendor/pgregory.net/rapid/shrink.go
generated
vendored
Normal file
@@ -0,0 +1,408 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
labelLowerFloatExp = "lower_float_exp"
|
||||
labelLowerFloatSignif = "lower_float_signif"
|
||||
labelLowerFloatFrac = "lower_float_frac"
|
||||
labelMinBlockBinSearch = "minblock_binsearch"
|
||||
labelMinBlockShift = "minblock_shift"
|
||||
labelMinBlockSort = "minblock_sort"
|
||||
labelMinBlockTrySmall = "minblock_trysmall"
|
||||
labelMinBlockUnset = "minblock_unset"
|
||||
labelRemoveGroup = "remove_group"
|
||||
labelRemoveGroupAndLower = "remove_group_lower"
|
||||
labelRemoveGroupSpan = "remove_groupspan"
|
||||
labelSortGroups = "sort_groups"
|
||||
)
|
||||
|
||||
func shrink(tb tb, deadline time.Time, rec recordedBits, err *testError, prop func(*T)) ([]uint64, *testError) {
|
||||
rec.prune()
|
||||
|
||||
s := &shrinker{
|
||||
tb: tb,
|
||||
rec: rec,
|
||||
err: err,
|
||||
prop: prop,
|
||||
visBits: []recordedBits{rec},
|
||||
tries: map[string]int{},
|
||||
cache: map[string]struct{}{},
|
||||
}
|
||||
|
||||
buf, err := s.shrink(deadline)
|
||||
|
||||
if flags.debugvis {
|
||||
name := fmt.Sprintf("vis-%v.html", strings.Replace(tb.Name(), "/", "_", -1))
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
tb.Logf("failed to create debugvis file %v: %v", name, err)
|
||||
} else {
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
if err = visWriteHTML(f, tb.Name(), s.visBits); err != nil {
|
||||
tb.Logf("failed to write debugvis file %v: %v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return buf, err
|
||||
}
|
||||
|
||||
type shrinker struct {
|
||||
tb tb
|
||||
rec recordedBits
|
||||
err *testError
|
||||
prop func(*T)
|
||||
visBits []recordedBits
|
||||
tries map[string]int
|
||||
shrinks int
|
||||
cache map[string]struct{}
|
||||
hits int
|
||||
}
|
||||
|
||||
func (s *shrinker) debugf(verbose_ bool, format string, args ...any) {
|
||||
if flags.debug && (!verbose_ || flags.verbose) {
|
||||
s.tb.Helper()
|
||||
s.tb.Logf("[shrink] "+format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *shrinker) shrink(deadline time.Time) (buf []uint64, err *testError) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
buf, err = s.rec.data, r.(*testError)
|
||||
}
|
||||
}()
|
||||
|
||||
i := 0
|
||||
for shrinks := -1; s.shrinks > shrinks && time.Now().Before(deadline); i++ {
|
||||
shrinks = s.shrinks
|
||||
|
||||
s.debugf(false, "round %v start", i)
|
||||
s.removeGroups(deadline)
|
||||
s.minimizeBlocks(deadline)
|
||||
|
||||
if s.shrinks == shrinks {
|
||||
s.debugf(false, "trying expensive algorithms for round %v", i)
|
||||
s.lowerFloatHack(deadline)
|
||||
s.removeGroupsAndLower(deadline)
|
||||
s.sortGroups(deadline)
|
||||
s.removeGroupSpans(deadline)
|
||||
}
|
||||
}
|
||||
|
||||
tries := 0
|
||||
for _, n := range s.tries {
|
||||
tries += n
|
||||
}
|
||||
s.debugf(false, "done, %v rounds total (%v tries, %v shrinks, %v cache hits):\n%v", i, tries, s.shrinks, s.hits, s.tries)
|
||||
|
||||
return s.rec.data, s.err
|
||||
}
|
||||
|
||||
func (s *shrinker) removeGroups(deadline time.Time) {
|
||||
for i := 0; i < len(s.rec.groups) && time.Now().Before(deadline); i++ {
|
||||
g := s.rec.groups[i]
|
||||
if !g.standalone || g.end < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if s.accept(without(s.rec.data, g), labelRemoveGroup, "remove group %q at %v: [%v, %v)", g.label, i, g.begin, g.end) {
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *shrinker) minimizeBlocks(deadline time.Time) {
|
||||
for i := 0; i < len(s.rec.data) && time.Now().Before(deadline); i++ {
|
||||
minimize(s.rec.data[i], func(u uint64, label string) bool {
|
||||
buf := append([]uint64(nil), s.rec.data...)
|
||||
buf[i] = u
|
||||
return s.accept(buf, label, "minimize block %v: %v to %v", i, s.rec.data[i], u)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *shrinker) lowerFloatHack(deadline time.Time) {
|
||||
for i := 0; i < len(s.rec.groups) && time.Now().Before(deadline); i++ {
|
||||
g := s.rec.groups[i]
|
||||
if !g.standalone || g.end != g.begin+7 {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := append([]uint64(nil), s.rec.data...)
|
||||
buf[g.begin+3] -= 1
|
||||
buf[g.begin+4] = math.MaxUint64
|
||||
buf[g.begin+5] = math.MaxUint64
|
||||
buf[g.begin+6] = math.MaxUint64
|
||||
|
||||
if !s.accept(buf, labelLowerFloatExp, "lower float exponent of group %q at %v to %v", g.label, i, buf[g.begin+3]) {
|
||||
buf := append([]uint64(nil), s.rec.data...)
|
||||
buf[g.begin+4] -= 1
|
||||
buf[g.begin+5] = math.MaxUint64
|
||||
buf[g.begin+6] = math.MaxUint64
|
||||
|
||||
if !s.accept(buf, labelLowerFloatSignif, "lower float significant of group %q at %v to %v", g.label, i, buf[g.begin+4]) {
|
||||
buf := append([]uint64(nil), s.rec.data...)
|
||||
buf[g.begin+5] -= 1
|
||||
buf[g.begin+6] = math.MaxUint64
|
||||
|
||||
s.accept(buf, labelLowerFloatFrac, "lower float frac of group %q at %v to %v", g.label, i, buf[g.begin+5])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *shrinker) removeGroupsAndLower(deadline time.Time) {
|
||||
for i := 0; i < len(s.rec.data) && time.Now().Before(deadline); i++ {
|
||||
if s.rec.data[i] == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := append([]uint64(nil), s.rec.data...)
|
||||
buf[i] -= 1
|
||||
|
||||
for j := 0; j < len(s.rec.groups); j++ {
|
||||
g := s.rec.groups[j]
|
||||
if !g.standalone || g.end < 0 || (i >= g.begin && i < g.end) {
|
||||
continue
|
||||
}
|
||||
|
||||
if s.accept(without(buf, g), labelRemoveGroupAndLower, "lower block %v to %v and remove group %q at %v: [%v, %v)", i, buf[i], g.label, j, g.begin, g.end) {
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *shrinker) sortGroups(deadline time.Time) {
|
||||
for i := 1; i < len(s.rec.groups) && time.Now().Before(deadline); i++ {
|
||||
for j := i; j > 0; {
|
||||
g := s.rec.groups[j]
|
||||
if !g.standalone || g.end < 0 {
|
||||
break
|
||||
}
|
||||
|
||||
j_ := j
|
||||
for j--; j >= 0; j-- {
|
||||
h := s.rec.groups[j]
|
||||
if !h.standalone || h.end < 0 || h.end > g.begin || h.label != g.label {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := append([]uint64(nil), s.rec.data[:h.begin]...)
|
||||
buf = append(buf, s.rec.data[g.begin:g.end]...)
|
||||
buf = append(buf, s.rec.data[h.end:g.begin]...)
|
||||
buf = append(buf, s.rec.data[h.begin:h.end]...)
|
||||
buf = append(buf, s.rec.data[g.end:]...)
|
||||
|
||||
if s.accept(buf, labelSortGroups, "swap groups %q at %v: [%v, %v) and %q at %v: [%v, %v)", g.label, j_, g.begin, g.end, h.label, j, h.begin, h.end) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *shrinker) removeGroupSpans(deadline time.Time) {
|
||||
for i := 0; i < len(s.rec.groups) && time.Now().Before(deadline); i++ {
|
||||
g := s.rec.groups[i]
|
||||
if !g.standalone || g.end < 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
groups := []groupInfo{g}
|
||||
for j := i + 1; j < len(s.rec.groups); j++ {
|
||||
h := s.rec.groups[j]
|
||||
if !h.standalone || h.end < 0 || h.begin < groups[len(groups)-1].end {
|
||||
continue
|
||||
}
|
||||
|
||||
groups = append(groups, h)
|
||||
buf := without(s.rec.data, groups...)
|
||||
|
||||
if s.accept(buf, labelRemoveGroupSpan, "remove %v groups %v", len(groups), groups) {
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *shrinker) accept(buf []uint64, label string, format string, args ...any) bool {
|
||||
if compareData(buf, s.rec.data) >= 0 {
|
||||
return false
|
||||
}
|
||||
bufStr := dataStr(buf)
|
||||
if _, ok := s.cache[bufStr]; ok {
|
||||
s.hits++
|
||||
return false
|
||||
}
|
||||
|
||||
s.debugf(true, label+": trying to reproduce the failure with a smaller test case: "+format, args...)
|
||||
s.tries[label]++
|
||||
s1 := newBufBitStream(buf, false)
|
||||
err1 := checkOnce(newT(s.tb, s1, flags.debug && flags.verbose, nil), s.prop)
|
||||
if traceback(err1) != traceback(s.err) {
|
||||
s.cache[bufStr] = struct{}{}
|
||||
return false
|
||||
}
|
||||
|
||||
s.debugf(true, label+": trying to reproduce the failure")
|
||||
s.tries[label]++
|
||||
s.err = err1
|
||||
s2 := newBufBitStream(buf, true)
|
||||
err2 := checkOnce(newT(s.tb, s2, flags.debug && flags.verbose, nil), s.prop)
|
||||
s.rec = s2.recordedBits
|
||||
s.rec.prune()
|
||||
assert(compareData(s.rec.data, buf) <= 0)
|
||||
if flags.debugvis {
|
||||
s.visBits = append(s.visBits, s.rec)
|
||||
}
|
||||
if !sameError(err1, err2) {
|
||||
panic(err2)
|
||||
}
|
||||
|
||||
s.debugf(false, label+" success: "+format, args...)
|
||||
s.shrinks++
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func minimize(u uint64, cond func(uint64, string) bool) uint64 {
|
||||
if u == 0 {
|
||||
return 0
|
||||
}
|
||||
for i := uint64(0); i < u && i < small; i++ {
|
||||
if cond(i, labelMinBlockTrySmall) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
if u <= small {
|
||||
return u
|
||||
}
|
||||
|
||||
m := &minimizer{best: u, cond: cond}
|
||||
|
||||
m.rShift()
|
||||
m.unsetBits()
|
||||
m.sortBits()
|
||||
m.binSearch()
|
||||
|
||||
return m.best
|
||||
}
|
||||
|
||||
type minimizer struct {
|
||||
best uint64
|
||||
cond func(uint64, string) bool
|
||||
}
|
||||
|
||||
func (m *minimizer) accept(u uint64, label string) bool {
|
||||
if u >= m.best || u < small || !m.cond(u, label) {
|
||||
return false
|
||||
}
|
||||
m.best = u
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *minimizer) rShift() {
|
||||
for m.accept(m.best>>1, labelMinBlockShift) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *minimizer) unsetBits() {
|
||||
size := bits.Len64(m.best)
|
||||
|
||||
for i := size - 1; i >= 0; i-- {
|
||||
m.accept(m.best^1<<uint(i), labelMinBlockUnset)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *minimizer) sortBits() {
|
||||
size := bits.Len64(m.best)
|
||||
|
||||
for i := size - 1; i >= 0; i-- {
|
||||
h := uint64(1 << uint(i))
|
||||
if m.best&h != 0 {
|
||||
for j := 0; j < i; j++ {
|
||||
l := uint64(1 << uint(j))
|
||||
if m.best&l == 0 {
|
||||
if m.accept(m.best^(l|h), labelMinBlockSort) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *minimizer) binSearch() {
|
||||
if !m.accept(m.best-1, labelMinBlockBinSearch) {
|
||||
return
|
||||
}
|
||||
|
||||
i := uint64(0)
|
||||
j := m.best
|
||||
for i < j {
|
||||
h := i + (j-i)/2
|
||||
if m.accept(h, labelMinBlockBinSearch) {
|
||||
j = h
|
||||
} else {
|
||||
i = h + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func without(data []uint64, groups ...groupInfo) []uint64 {
|
||||
buf := append([]uint64(nil), data...)
|
||||
|
||||
for i := len(groups) - 1; i >= 0; i-- {
|
||||
g := groups[i]
|
||||
buf = append(buf[:g.begin], buf[g.end:]...)
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func dataStr(data []uint64) string {
|
||||
b := &strings.Builder{}
|
||||
err := binary.Write(b, binary.LittleEndian, data)
|
||||
assert(err == nil)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func compareData(a []uint64, b []uint64) int {
|
||||
if len(a) < len(b) {
|
||||
return -1
|
||||
}
|
||||
if len(a) > len(b) {
|
||||
return 1
|
||||
}
|
||||
|
||||
for i := range a {
|
||||
if a[i] < b[i] {
|
||||
return -1
|
||||
}
|
||||
if a[i] > b[i] {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
157
vendor/pgregory.net/rapid/statemachine.go
generated
vendored
Normal file
157
vendor/pgregory.net/rapid/statemachine.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
actionLabel = "action"
|
||||
validActionTries = 100 // hack, but probably good enough for now
|
||||
checkMethodName = "Check"
|
||||
noValidActionsMsg = "can't find a valid (non-skipped) action"
|
||||
)
|
||||
|
||||
// Repeat executes a random sequence of actions (often called a "state machine" test).
|
||||
// actions[""], if set, is executed before/after every other action invocation
|
||||
// and should only contain invariant checking code.
|
||||
//
|
||||
// For complex state machines, it can be more convenient to specify actions as
|
||||
// methods of a special state machine type. In this case, [StateMachineActions]
|
||||
// can be used to create an actions map from state machine methods using reflection.
|
||||
func (t *T) Repeat(actions map[string]func(*T)) {
|
||||
t.Helper()
|
||||
|
||||
check := func(*T) {}
|
||||
actionKeys := make([]string, 0, len(actions))
|
||||
for key, action := range actions {
|
||||
if key != "" {
|
||||
actionKeys = append(actionKeys, key)
|
||||
} else {
|
||||
check = action
|
||||
}
|
||||
}
|
||||
if len(actionKeys) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Strings(actionKeys)
|
||||
|
||||
steps := flags.steps
|
||||
if testing.Short() {
|
||||
steps /= 2
|
||||
}
|
||||
|
||||
repeat := newRepeat(-1, -1, float64(steps), "Repeat")
|
||||
sm := stateMachine{
|
||||
check: check,
|
||||
actionKeys: SampledFrom(actionKeys),
|
||||
actions: actions,
|
||||
}
|
||||
|
||||
sm.check(t)
|
||||
t.failOnError()
|
||||
for repeat.more(t.s) {
|
||||
ok := sm.executeAction(t)
|
||||
if ok {
|
||||
sm.check(t)
|
||||
t.failOnError()
|
||||
} else {
|
||||
repeat.reject()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type StateMachine interface {
|
||||
// Check is ran after every action and should contain invariant checks.
|
||||
//
|
||||
// All other public methods should have a form ActionName(t *rapid.T)
|
||||
// or ActionName(t rapid.TB) and are used as possible actions.
|
||||
// At least one action has to be specified.
|
||||
Check(*T)
|
||||
}
|
||||
|
||||
// StateMachineActions creates an actions map for [*T.Repeat]
|
||||
// from methods of a [StateMachine] type instance using reflection.
|
||||
func StateMachineActions(sm StateMachine) map[string]func(*T) {
|
||||
var (
|
||||
v = reflect.ValueOf(sm)
|
||||
t = v.Type()
|
||||
n = t.NumMethod()
|
||||
)
|
||||
|
||||
actions := make(map[string]func(*T), n)
|
||||
for i := 0; i < n; i++ {
|
||||
name := t.Method(i).Name
|
||||
|
||||
if name == checkMethodName {
|
||||
continue
|
||||
}
|
||||
|
||||
m, ok := v.Method(i).Interface().(func(*T))
|
||||
if ok {
|
||||
actions[name] = m
|
||||
}
|
||||
|
||||
m2, ok := v.Method(i).Interface().(func(TB))
|
||||
if ok {
|
||||
actions[name] = func(t *T) {
|
||||
m2(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assertf(len(actions) > 0, "state machine of type %v has no actions specified", t)
|
||||
actions[""] = sm.Check
|
||||
|
||||
return actions
|
||||
}
|
||||
|
||||
type stateMachine struct {
|
||||
check func(*T)
|
||||
actionKeys *Generator[string]
|
||||
actions map[string]func(*T)
|
||||
}
|
||||
|
||||
func (sm *stateMachine) executeAction(t *T) bool {
|
||||
t.Helper()
|
||||
|
||||
for n := 0; n < validActionTries; n++ {
|
||||
i := t.s.beginGroup(actionLabel, false)
|
||||
action := sm.actions[sm.actionKeys.Draw(t, "action")]
|
||||
invalid, skipped := runAction(t, action)
|
||||
t.s.endGroup(i, false)
|
||||
|
||||
if skipped {
|
||||
continue
|
||||
} else {
|
||||
return !invalid
|
||||
}
|
||||
}
|
||||
|
||||
panic(stopTest(noValidActionsMsg))
|
||||
}
|
||||
|
||||
func runAction(t *T, action func(*T)) (invalid bool, skipped bool) {
|
||||
defer func(draws int) {
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(invalidData); ok {
|
||||
invalid = true
|
||||
skipped = t.draws == draws
|
||||
} else {
|
||||
panic(r)
|
||||
}
|
||||
}
|
||||
}(t.draws)
|
||||
|
||||
action(t)
|
||||
t.failOnError()
|
||||
|
||||
return false, false
|
||||
}
|
||||
459
vendor/pgregory.net/rapid/strings.go
generated
vendored
Normal file
459
vendor/pgregory.net/rapid/strings.go
generated
vendored
Normal file
@@ -0,0 +1,459 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"regexp/syntax"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRunes = []rune{
|
||||
'A', 'a', '?',
|
||||
'~', '!', '@', '#', '$', '%', '^', '&', '*', '_', '-', '+', '=',
|
||||
'.', ',', ':', ';',
|
||||
' ', '\t', '\r', '\n',
|
||||
'/', '\\', '|',
|
||||
'(', '[', '{', '<',
|
||||
'\'', '"', '`',
|
||||
'\x00', '\x0B', '\x1B', '\x7F', // NUL, VT, ESC, DEL
|
||||
'\uFEFF', '\uFFFD', '\u202E', // BOM, replacement character, RTL override
|
||||
'Ⱥ', // In UTF-8, Ⱥ increases in length from 2 to 3 bytes when lowercased
|
||||
}
|
||||
|
||||
// unicode.Categories without surrogates (which are not allowed in UTF-8), ordered by taste
|
||||
defaultTables = []*unicode.RangeTable{
|
||||
unicode.Lu, // Letter, uppercase (1781)
|
||||
unicode.Ll, // Letter, lowercase (2145)
|
||||
unicode.Lt, // Letter, titlecase (31)
|
||||
unicode.Lm, // Letter, modifier (250)
|
||||
unicode.Lo, // Letter, other (121212)
|
||||
unicode.Nd, // Number, decimal digit (610)
|
||||
unicode.Nl, // Number, letter (236)
|
||||
unicode.No, // Number, other (807)
|
||||
unicode.P, // Punctuation (788)
|
||||
unicode.Sm, // Symbol, math (948)
|
||||
unicode.Sc, // Symbol, currency (57)
|
||||
unicode.Sk, // Symbol, modifier (121)
|
||||
unicode.So, // Symbol, other (5984)
|
||||
unicode.Mn, // Mark, nonspacing (1805)
|
||||
unicode.Me, // Mark, enclosing (13)
|
||||
unicode.Mc, // Mark, spacing combining (415)
|
||||
unicode.Z, // Separator (19)
|
||||
unicode.Cc, // Other, control (65)
|
||||
unicode.Cf, // Other, format (152)
|
||||
unicode.Co, // Other, private use (137468)
|
||||
}
|
||||
|
||||
expandedTables = sync.Map{} // *unicode.RangeTable / regexp name -> []rune
|
||||
compiledRegexps = sync.Map{} // regexp -> compiledRegexp
|
||||
regexpNames = sync.Map{} // *regexp.Regexp -> string
|
||||
charClassGens = sync.Map{} // regexp name -> *Generator
|
||||
|
||||
anyRuneGen = Rune()
|
||||
anyRuneGenNoNL = Rune().Filter(func(r rune) bool { return r != '\n' })
|
||||
)
|
||||
|
||||
type compiledRegexp struct {
|
||||
syn *syntax.Regexp
|
||||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
// Rune creates a rune generator. Rune is equivalent to [RuneFrom] with default set of runes and tables.
|
||||
func Rune() *Generator[rune] {
|
||||
return runesFrom(true, defaultRunes, defaultTables...)
|
||||
}
|
||||
|
||||
// RuneFrom creates a rune generator from provided runes and tables.
|
||||
// RuneFrom panics if both runes and tables are empty. RuneFrom panics if tables contain an empty table.
|
||||
func RuneFrom(runes []rune, tables ...*unicode.RangeTable) *Generator[rune] {
|
||||
return runesFrom(false, runes, tables...)
|
||||
}
|
||||
|
||||
func runesFrom(default_ bool, runes []rune, tables ...*unicode.RangeTable) *Generator[rune] {
|
||||
if len(tables) == 0 {
|
||||
assertf(len(runes) > 0, "at least one rune should be specified")
|
||||
}
|
||||
if len(runes) == 0 {
|
||||
assertf(len(tables) > 0, "at least one *unicode.RangeTable should be specified")
|
||||
}
|
||||
|
||||
var weights []int
|
||||
if len(runes) > 0 {
|
||||
weights = append(weights, len(tables))
|
||||
}
|
||||
for range tables {
|
||||
weights = append(weights, 1)
|
||||
}
|
||||
|
||||
tables_ := make([][]rune, len(tables))
|
||||
for i := range tables {
|
||||
tables_[i] = expandRangeTable(tables[i], tables[i])
|
||||
assertf(len(tables_[i]) > 0, "empty *unicode.RangeTable %v", i)
|
||||
}
|
||||
|
||||
return newGenerator[rune](&runeGen{
|
||||
die: newLoadedDie(weights),
|
||||
runes: runes,
|
||||
tables: tables_,
|
||||
default_: default_,
|
||||
})
|
||||
}
|
||||
|
||||
type runeGen struct {
|
||||
die *loadedDie
|
||||
runes []rune
|
||||
tables [][]rune
|
||||
default_ bool
|
||||
}
|
||||
|
||||
func (g *runeGen) String() string {
|
||||
if g.default_ {
|
||||
return "Rune()"
|
||||
} else {
|
||||
return fmt.Sprintf("Rune(%v runes, %v tables)", len(g.runes), len(g.tables))
|
||||
}
|
||||
}
|
||||
|
||||
func (g *runeGen) value(t *T) rune {
|
||||
n := g.die.roll(t.s)
|
||||
|
||||
runes := g.runes
|
||||
if len(g.runes) == 0 {
|
||||
runes = g.tables[n]
|
||||
} else if n > 0 {
|
||||
runes = g.tables[n-1]
|
||||
}
|
||||
|
||||
return runes[genIndex(t.s, len(runes), true)]
|
||||
}
|
||||
|
||||
// String is a shorthand for [StringOf]([Rune]()).
|
||||
func String() *Generator[string] {
|
||||
return StringOf(anyRuneGen)
|
||||
}
|
||||
|
||||
// StringN is a shorthand for [StringOfN]([Rune](), minRunes, maxRunes, maxLen).
|
||||
func StringN(minRunes int, maxRunes int, maxLen int) *Generator[string] {
|
||||
return StringOfN(anyRuneGen, minRunes, maxRunes, maxLen)
|
||||
}
|
||||
|
||||
// StringOf is a shorthand for [StringOfN](elem, -1, -1, -1).
|
||||
func StringOf(elem *Generator[rune]) *Generator[string] {
|
||||
return StringOfN(elem, -1, -1, -1)
|
||||
}
|
||||
|
||||
// StringOfN creates a UTF-8 string generator.
|
||||
// If minRunes >= 0, generated strings have minimum minRunes runes.
|
||||
// If maxRunes >= 0, generated strings have maximum maxRunes runes.
|
||||
// If maxLen >= 0, generates strings have maximum length of maxLen.
|
||||
// StringOfN panics if maxRunes >= 0 and minRunes > maxRunes.
|
||||
// StringOfN panics if maxLen >= 0 and maxLen < maxRunes.
|
||||
func StringOfN(elem *Generator[rune], minRunes int, maxRunes int, maxLen int) *Generator[string] {
|
||||
assertValidRange(minRunes, maxRunes)
|
||||
assertf(maxLen < 0 || maxLen >= maxRunes, "maximum length (%v) should not be less than maximum number of runes (%v)", maxLen, maxRunes)
|
||||
|
||||
return newGenerator[string](&stringGen{
|
||||
elem: elem,
|
||||
minRunes: minRunes,
|
||||
maxRunes: maxRunes,
|
||||
maxLen: maxLen,
|
||||
})
|
||||
}
|
||||
|
||||
type stringGen struct {
|
||||
elem *Generator[rune]
|
||||
minRunes int
|
||||
maxRunes int
|
||||
maxLen int
|
||||
}
|
||||
|
||||
func (g *stringGen) String() string {
|
||||
if g.elem == anyRuneGen {
|
||||
if g.minRunes < 0 && g.maxRunes < 0 && g.maxLen < 0 {
|
||||
return "String()"
|
||||
} else {
|
||||
return fmt.Sprintf("StringN(minRunes=%v, maxRunes=%v, maxLen=%v)", g.minRunes, g.maxRunes, g.maxLen)
|
||||
}
|
||||
} else {
|
||||
if g.minRunes < 0 && g.maxRunes < 0 && g.maxLen < 0 {
|
||||
return fmt.Sprintf("StringOf(%v)", g.elem)
|
||||
} else {
|
||||
return fmt.Sprintf("StringOfN(%v, minRunes=%v, maxRunes=%v, maxLen=%v)", g.elem, g.minRunes, g.maxRunes, g.maxLen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *stringGen) value(t *T) string {
|
||||
repeat := newRepeat(g.minRunes, g.maxRunes, -1, g.elem.String())
|
||||
|
||||
var b strings.Builder
|
||||
b.Grow(repeat.avg())
|
||||
|
||||
maxLen := g.maxLen
|
||||
if maxLen < 0 {
|
||||
maxLen = math.MaxInt
|
||||
}
|
||||
|
||||
for repeat.more(t.s) {
|
||||
r := g.elem.value(t)
|
||||
n := utf8.RuneLen(r)
|
||||
|
||||
if n < 0 || b.Len()+n > maxLen {
|
||||
repeat.reject()
|
||||
} else {
|
||||
b.WriteRune(r)
|
||||
}
|
||||
}
|
||||
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// StringMatching creates a UTF-8 string generator matching the provided [syntax.Perl] regular expression.
|
||||
func StringMatching(expr string) *Generator[string] {
|
||||
compiled, err := compileRegexp(expr)
|
||||
assertf(err == nil, "%v", err)
|
||||
|
||||
return newGenerator[string](®expStringGen{
|
||||
regexpGen{
|
||||
expr: expr,
|
||||
syn: compiled.syn,
|
||||
re: compiled.re,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// SliceOfBytesMatching creates a UTF-8 byte slice generator matching the provided [syntax.Perl] regular expression.
|
||||
func SliceOfBytesMatching(expr string) *Generator[[]byte] {
|
||||
compiled, err := compileRegexp(expr)
|
||||
assertf(err == nil, "%v", err)
|
||||
|
||||
return newGenerator[[]byte](®expSliceGen{
|
||||
regexpGen{
|
||||
expr: expr,
|
||||
syn: compiled.syn,
|
||||
re: compiled.re,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type runeWriter interface {
|
||||
WriteRune(r rune) (int, error)
|
||||
}
|
||||
|
||||
type regexpGen struct {
|
||||
expr string
|
||||
syn *syntax.Regexp
|
||||
re *regexp.Regexp
|
||||
}
|
||||
type regexpStringGen struct{ regexpGen }
|
||||
type regexpSliceGen struct{ regexpGen }
|
||||
|
||||
func (g *regexpStringGen) String() string {
|
||||
return fmt.Sprintf("StringMatching(%q)", g.expr)
|
||||
}
|
||||
func (g *regexpSliceGen) String() string {
|
||||
return fmt.Sprintf("SliceOfBytesMatching(%q)", g.expr)
|
||||
}
|
||||
|
||||
func (g *regexpStringGen) maybeString(t *T) (string, bool) {
|
||||
b := &strings.Builder{}
|
||||
g.build(b, g.syn, t)
|
||||
v := b.String()
|
||||
|
||||
if g.re.MatchString(v) {
|
||||
return v, true
|
||||
} else {
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
func (g *regexpSliceGen) maybeSlice(t *T) ([]byte, bool) {
|
||||
b := &bytes.Buffer{}
|
||||
g.build(b, g.syn, t)
|
||||
v := b.Bytes()
|
||||
|
||||
if g.re.Match(v) {
|
||||
return v, true
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func (g *regexpStringGen) value(t *T) string {
|
||||
return find(g.maybeString, t, small)
|
||||
}
|
||||
func (g *regexpSliceGen) value(t *T) []byte {
|
||||
return find(g.maybeSlice, t, small)
|
||||
}
|
||||
|
||||
func (g *regexpGen) build(w runeWriter, re *syntax.Regexp, t *T) {
|
||||
i := t.s.beginGroup(re.Op.String(), false)
|
||||
|
||||
switch re.Op {
|
||||
case syntax.OpNoMatch:
|
||||
panic(invalidData("no possible regexp match"))
|
||||
case syntax.OpEmptyMatch:
|
||||
t.s.drawBits(0)
|
||||
case syntax.OpLiteral:
|
||||
t.s.drawBits(0)
|
||||
for _, r := range re.Rune {
|
||||
_, _ = w.WriteRune(maybeFoldCase(t.s, r, re.Flags))
|
||||
}
|
||||
case syntax.OpCharClass, syntax.OpAnyCharNotNL, syntax.OpAnyChar:
|
||||
sub := anyRuneGen
|
||||
switch re.Op {
|
||||
case syntax.OpCharClass:
|
||||
sub = charClassGen(re)
|
||||
case syntax.OpAnyCharNotNL:
|
||||
sub = anyRuneGenNoNL
|
||||
}
|
||||
r := sub.value(t)
|
||||
_, _ = w.WriteRune(maybeFoldCase(t.s, r, re.Flags))
|
||||
case syntax.OpBeginLine, syntax.OpEndLine,
|
||||
syntax.OpBeginText, syntax.OpEndText,
|
||||
syntax.OpWordBoundary, syntax.OpNoWordBoundary:
|
||||
t.s.drawBits(0) // do nothing and hope that find() is enough
|
||||
case syntax.OpCapture:
|
||||
g.build(w, re.Sub[0], t)
|
||||
case syntax.OpStar, syntax.OpPlus, syntax.OpQuest, syntax.OpRepeat:
|
||||
min, max := re.Min, re.Max
|
||||
switch re.Op {
|
||||
case syntax.OpStar:
|
||||
min, max = 0, -1
|
||||
case syntax.OpPlus:
|
||||
min, max = 1, -1
|
||||
case syntax.OpQuest:
|
||||
min, max = 0, 1
|
||||
}
|
||||
repeat := newRepeat(min, max, -1, regexpName(re.Sub[0]))
|
||||
for repeat.more(t.s) {
|
||||
g.build(w, re.Sub[0], t)
|
||||
}
|
||||
case syntax.OpConcat:
|
||||
for _, sub := range re.Sub {
|
||||
g.build(w, sub, t)
|
||||
}
|
||||
case syntax.OpAlternate:
|
||||
ix := genIndex(t.s, len(re.Sub), true)
|
||||
g.build(w, re.Sub[ix], t)
|
||||
default:
|
||||
assertf(false, "invalid regexp op %v", re.Op)
|
||||
}
|
||||
|
||||
t.s.endGroup(i, false)
|
||||
}
|
||||
|
||||
func maybeFoldCase(s bitStream, r rune, flags syntax.Flags) rune {
|
||||
n := uint64(0)
|
||||
if flags&syntax.FoldCase != 0 {
|
||||
n, _, _ = genUintN(s, 4, false)
|
||||
}
|
||||
|
||||
for i := 0; i < int(n); i++ {
|
||||
r = unicode.SimpleFold(r)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func expandRangeTable(t *unicode.RangeTable, key any) []rune {
|
||||
cached, ok := expandedTables.Load(key)
|
||||
if ok {
|
||||
return cached.([]rune)
|
||||
}
|
||||
|
||||
n := 0
|
||||
for _, r := range t.R16 {
|
||||
n += int(r.Hi-r.Lo)/int(r.Stride) + 1
|
||||
}
|
||||
for _, r := range t.R32 {
|
||||
n += int(r.Hi-r.Lo)/int(r.Stride) + 1
|
||||
}
|
||||
|
||||
ret := make([]rune, 0, n)
|
||||
for _, r := range t.R16 {
|
||||
for i := uint32(r.Lo); i <= uint32(r.Hi); i += uint32(r.Stride) {
|
||||
ret = append(ret, rune(i))
|
||||
}
|
||||
}
|
||||
for _, r := range t.R32 {
|
||||
for i := uint64(r.Lo); i <= uint64(r.Hi); i += uint64(r.Stride) {
|
||||
ret = append(ret, rune(i))
|
||||
}
|
||||
}
|
||||
expandedTables.Store(key, ret)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func compileRegexp(expr string) (compiledRegexp, error) {
|
||||
cached, ok := compiledRegexps.Load(expr)
|
||||
if ok {
|
||||
return cached.(compiledRegexp), nil
|
||||
}
|
||||
|
||||
syn, err := syntax.Parse(expr, syntax.Perl)
|
||||
if err != nil {
|
||||
return compiledRegexp{}, fmt.Errorf("failed to parse regexp %q: %v", expr, err)
|
||||
}
|
||||
|
||||
re, err := regexp.Compile(expr)
|
||||
if err != nil {
|
||||
return compiledRegexp{}, fmt.Errorf("failed to compile regexp %q: %v", expr, err)
|
||||
}
|
||||
|
||||
ret := compiledRegexp{syn, re}
|
||||
compiledRegexps.Store(expr, ret)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func regexpName(re *syntax.Regexp) string {
|
||||
cached, ok := regexpNames.Load(re)
|
||||
if ok {
|
||||
return cached.(string)
|
||||
}
|
||||
|
||||
s := re.String()
|
||||
regexpNames.Store(re, s)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func charClassGen(re *syntax.Regexp) *Generator[rune] {
|
||||
cached, ok := charClassGens.Load(regexpName(re))
|
||||
if ok {
|
||||
return cached.(*Generator[rune])
|
||||
}
|
||||
|
||||
t := &unicode.RangeTable{R32: make([]unicode.Range32, 0, len(re.Rune)/2)}
|
||||
for i := 0; i < len(re.Rune); i += 2 {
|
||||
// not a valid unicode.Range32, since it requires that Lo and Hi must always be >= 1<<16
|
||||
// however, we don't really care, since the only use of these ranges is as input to expandRangeTable
|
||||
t.R32 = append(t.R32, unicode.Range32{
|
||||
Lo: uint32(re.Rune[i]),
|
||||
Hi: uint32(re.Rune[i+1]),
|
||||
Stride: 1,
|
||||
})
|
||||
}
|
||||
|
||||
g := newGenerator[rune](&runeGen{
|
||||
die: newLoadedDie([]int{1}),
|
||||
tables: [][]rune{expandRangeTable(t, regexpName(re))},
|
||||
})
|
||||
charClassGens.Store(regexpName(re), g)
|
||||
|
||||
return g
|
||||
}
|
||||
276
vendor/pgregory.net/rapid/utils.go
generated
vendored
Normal file
276
vendor/pgregory.net/rapid/utils.go
generated
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
biasLabel = "bias"
|
||||
intBitsLabel = "intbits"
|
||||
coinFlipLabel = "coinflip"
|
||||
dieRollLabel = "dieroll"
|
||||
repeatLabel = "@repeat"
|
||||
)
|
||||
|
||||
func bitmask64(n uint) uint64 {
|
||||
return uint64(1)<<n - 1
|
||||
}
|
||||
|
||||
func genFloat01(s bitStream) float64 {
|
||||
return float64(s.drawBits(53)) * 0x1.0p-53
|
||||
}
|
||||
|
||||
func genGeom(s bitStream, p float64) uint64 {
|
||||
assert(p > 0 && p <= 1)
|
||||
|
||||
f := genFloat01(s)
|
||||
n := math.Log1p(-f) / math.Log1p(-p)
|
||||
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func genUintNNoReject(s bitStream, max uint64) uint64 {
|
||||
bitlen := bits.Len64(max)
|
||||
i := s.beginGroup(intBitsLabel, false)
|
||||
u := s.drawBits(bitlen)
|
||||
s.endGroup(i, false)
|
||||
if u > max {
|
||||
u = max
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func genUintNUnbiased(s bitStream, max uint64) uint64 {
|
||||
bitlen := bits.Len64(max)
|
||||
|
||||
for {
|
||||
i := s.beginGroup(intBitsLabel, false)
|
||||
u := s.drawBits(bitlen)
|
||||
ok := u <= max
|
||||
s.endGroup(i, !ok)
|
||||
if ok {
|
||||
return u
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genUintNBiased(s bitStream, max uint64) (uint64, bool, bool) {
|
||||
bitlen := bits.Len64(max)
|
||||
i := s.beginGroup(biasLabel, false)
|
||||
m := math.Max(8, (float64(bitlen)+48)/7)
|
||||
n := genGeom(s, 1/(m+1)) + 1
|
||||
s.endGroup(i, false)
|
||||
|
||||
if int(n) < bitlen {
|
||||
bitlen = int(n)
|
||||
} else if int(n) >= 64-(16-int(m))*4 {
|
||||
bitlen = 65
|
||||
}
|
||||
|
||||
for {
|
||||
i := s.beginGroup(intBitsLabel, false)
|
||||
u := s.drawBits(bitlen)
|
||||
ok := bitlen > 64 || u <= max
|
||||
s.endGroup(i, !ok)
|
||||
if bitlen > 64 {
|
||||
u = max
|
||||
}
|
||||
if u <= max {
|
||||
return u, u == 0 && n == 1, u == max && bitlen >= int(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genUintN(s bitStream, max uint64, bias bool) (uint64, bool, bool) {
|
||||
if bias {
|
||||
return genUintNBiased(s, max)
|
||||
} else {
|
||||
return genUintNUnbiased(s, max), false, false
|
||||
}
|
||||
}
|
||||
|
||||
func genUintRange(s bitStream, min uint64, max uint64, bias bool) (uint64, bool, bool) {
|
||||
if min > max {
|
||||
assertf(false, "invalid range [%v, %v]", min, max) // avoid allocations in the fast path
|
||||
}
|
||||
|
||||
u, lOverflow, rOverflow := genUintN(s, max-min, bias)
|
||||
|
||||
return min + u, lOverflow, rOverflow
|
||||
}
|
||||
|
||||
func genIntRange(s bitStream, min int64, max int64, bias bool) (int64, bool, bool) {
|
||||
if min > max {
|
||||
assertf(false, "invalid range [%v, %v]", min, max) // avoid allocations in the fast path
|
||||
}
|
||||
|
||||
var posMin, negMin uint64
|
||||
var pNeg float64
|
||||
if min >= 0 {
|
||||
posMin = uint64(min)
|
||||
pNeg = 0
|
||||
} else if max <= 0 {
|
||||
negMin = uint64(-max)
|
||||
pNeg = 1
|
||||
} else {
|
||||
posMin = 0
|
||||
negMin = 1
|
||||
pos := uint64(max) + 1
|
||||
neg := uint64(-min)
|
||||
pNeg = float64(neg) / (float64(neg) + float64(pos))
|
||||
if bias {
|
||||
pNeg = 0.5
|
||||
}
|
||||
}
|
||||
|
||||
if flipBiasedCoin(s, pNeg) {
|
||||
u, lOverflow, rOverflow := genUintRange(s, negMin, uint64(-min), bias)
|
||||
return -int64(u), rOverflow, lOverflow && max <= 0
|
||||
} else {
|
||||
u, lOverflow, rOverflow := genUintRange(s, posMin, uint64(max), bias)
|
||||
return int64(u), lOverflow && min >= 0, rOverflow
|
||||
}
|
||||
}
|
||||
|
||||
func genIndex(s bitStream, n int, bias bool) int {
|
||||
assert(n > 0)
|
||||
|
||||
u, _, _ := genUintN(s, uint64(n-1), bias)
|
||||
|
||||
return int(u)
|
||||
}
|
||||
|
||||
func flipBiasedCoin(s bitStream, p float64) bool {
|
||||
assert(p >= 0 && p <= 1)
|
||||
|
||||
i := s.beginGroup(coinFlipLabel, false)
|
||||
f := genFloat01(s)
|
||||
s.endGroup(i, false)
|
||||
|
||||
return f >= 1-p
|
||||
}
|
||||
|
||||
type loadedDie struct {
|
||||
table []int
|
||||
}
|
||||
|
||||
func newLoadedDie(weights []int) *loadedDie {
|
||||
assert(len(weights) > 0)
|
||||
|
||||
if len(weights) == 1 {
|
||||
return &loadedDie{
|
||||
table: []int{0},
|
||||
}
|
||||
}
|
||||
|
||||
total := 0
|
||||
for _, w := range weights {
|
||||
assert(w > 0 && w < 100)
|
||||
total += w
|
||||
}
|
||||
|
||||
table := make([]int, total)
|
||||
i := 0
|
||||
for n, w := range weights {
|
||||
for j := i; i < j+w; i++ {
|
||||
table[i] = n
|
||||
}
|
||||
}
|
||||
|
||||
return &loadedDie{
|
||||
table: table,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *loadedDie) roll(s bitStream) int {
|
||||
i := s.beginGroup(dieRollLabel, false)
|
||||
ix := genIndex(s, len(d.table), false)
|
||||
s.endGroup(i, false)
|
||||
|
||||
return d.table[ix]
|
||||
}
|
||||
|
||||
type repeat struct {
|
||||
minCount int
|
||||
maxCount int
|
||||
avgCount float64
|
||||
pContinue float64
|
||||
count int
|
||||
group int
|
||||
rejected bool
|
||||
rejections int
|
||||
forceStop bool
|
||||
label string
|
||||
}
|
||||
|
||||
func newRepeat(minCount int, maxCount int, avgCount float64, label string) *repeat {
|
||||
if minCount < 0 {
|
||||
minCount = 0
|
||||
}
|
||||
if maxCount < 0 {
|
||||
maxCount = math.MaxInt
|
||||
}
|
||||
if avgCount < 0 {
|
||||
avgCount = float64(minCount) + math.Min(math.Max(float64(minCount), small), (float64(maxCount)-float64(minCount))/2)
|
||||
}
|
||||
|
||||
return &repeat{
|
||||
minCount: minCount,
|
||||
maxCount: maxCount,
|
||||
avgCount: avgCount,
|
||||
pContinue: 1 - 1/(1+avgCount-float64(minCount)), // TODO was no -minCount intentional?
|
||||
group: -1,
|
||||
label: label + repeatLabel,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *repeat) avg() int {
|
||||
return int(math.Ceil(r.avgCount))
|
||||
}
|
||||
|
||||
func (r *repeat) more(s bitStream) bool {
|
||||
if r.group >= 0 {
|
||||
s.endGroup(r.group, r.rejected)
|
||||
}
|
||||
|
||||
r.group = s.beginGroup(r.label, true)
|
||||
r.rejected = false
|
||||
|
||||
pCont := r.pContinue
|
||||
if r.count < r.minCount {
|
||||
pCont = 1
|
||||
} else if r.forceStop || r.count >= r.maxCount {
|
||||
pCont = 0
|
||||
}
|
||||
|
||||
cont := flipBiasedCoin(s, pCont)
|
||||
if cont {
|
||||
r.count++
|
||||
} else {
|
||||
s.endGroup(r.group, false)
|
||||
}
|
||||
|
||||
return cont
|
||||
}
|
||||
|
||||
func (r *repeat) reject() {
|
||||
assert(r.count > 0)
|
||||
r.count--
|
||||
r.rejected = true
|
||||
r.rejections++
|
||||
|
||||
if r.rejections > r.count*2 {
|
||||
if r.count >= r.minCount {
|
||||
r.forceStop = true
|
||||
} else {
|
||||
panic(invalidData("too many rejections in repeat"))
|
||||
}
|
||||
}
|
||||
}
|
||||
693
vendor/pgregory.net/rapid/vis.go
generated
vendored
Normal file
693
vendor/pgregory.net/rapid/vis.go
generated
vendored
Normal file
@@ -0,0 +1,693 @@
|
||||
// Copyright 2019 Gregory Petrosyan <gregory.petrosyan@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package rapid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/png"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
visBitSetColor = color.Black
|
||||
visBitUnsetColor = color.White
|
||||
|
||||
visTmpl = template.Must(template.New("rapid-vis").Parse(visHTML))
|
||||
)
|
||||
|
||||
type visTmplData struct {
|
||||
Title string
|
||||
Images [][]*visTmplImage
|
||||
VisCSS template.CSS
|
||||
RebootCSS template.CSS
|
||||
}
|
||||
|
||||
type visGroupInfo struct {
|
||||
Classes string
|
||||
Label string
|
||||
}
|
||||
|
||||
type visTmplImage struct {
|
||||
Base64 string
|
||||
Title string
|
||||
Alt string
|
||||
Width int
|
||||
Height int
|
||||
|
||||
GroupBegins []visGroupInfo
|
||||
GroupEnds []visGroupInfo
|
||||
}
|
||||
|
||||
func visWriteHTML(w io.Writer, title string, recData []recordedBits) error {
|
||||
d := &visTmplData{
|
||||
Title: fmt.Sprintf("%v (%v)", title, time.Now().Format(time.RFC1123)),
|
||||
VisCSS: template.CSS(visCSS),
|
||||
RebootCSS: template.CSS(visRebootCSS),
|
||||
}
|
||||
|
||||
labelClasses := map[string]string{}
|
||||
lastLabelClass := 0
|
||||
|
||||
for _, rd := range recData {
|
||||
var images []*visTmplImage
|
||||
|
||||
for _, u := range rd.data {
|
||||
tmplImg, err := visNewUint64Image(u).toTmplImage()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
images = append(images, tmplImg)
|
||||
}
|
||||
|
||||
for _, group := range rd.groups {
|
||||
if _, ok := labelClasses[group.label]; !ok {
|
||||
labelClasses[group.label] = fmt.Sprintf("label-%v", lastLabelClass)
|
||||
lastLabelClass++
|
||||
}
|
||||
}
|
||||
|
||||
for _, group := range rd.groups {
|
||||
images[group.begin].GroupBegins = append(images[group.begin].GroupBegins, visGroupToInfo(labelClasses, group))
|
||||
if group.end > 0 {
|
||||
images[group.end-1].GroupEnds = append(images[group.end-1].GroupEnds, visGroupToInfo(labelClasses, group))
|
||||
}
|
||||
}
|
||||
|
||||
d.Images = append(d.Images, images)
|
||||
}
|
||||
|
||||
return visTmpl.Execute(w, d)
|
||||
}
|
||||
|
||||
func visGroupToInfo(labelClasses map[string]string, group groupInfo) visGroupInfo {
|
||||
discardClass := ""
|
||||
if group.discard {
|
||||
discardClass = "discard "
|
||||
}
|
||||
|
||||
endlessClass := ""
|
||||
if group.end <= 0 {
|
||||
endlessClass = "endless "
|
||||
}
|
||||
|
||||
return visGroupInfo{
|
||||
Classes: discardClass + endlessClass + labelClasses[group.label],
|
||||
Label: group.label,
|
||||
}
|
||||
}
|
||||
|
||||
type visUint64Image struct {
|
||||
u uint64
|
||||
p color.Palette
|
||||
}
|
||||
|
||||
func visNewUint64Image(u uint64) *visUint64Image {
|
||||
s := newRandomBitStream(u, false)
|
||||
h1 := genFloat01(s)
|
||||
h2 := genFloat01(s)
|
||||
|
||||
return &visUint64Image{
|
||||
u: u,
|
||||
p: color.Palette{visBitSetColor, visBitUnsetColor, visHsv(h1*360, 1, 1), visHsv(h2*360, 1, 1)},
|
||||
}
|
||||
}
|
||||
|
||||
func (img *visUint64Image) ColorModel() color.Model {
|
||||
return img.p
|
||||
}
|
||||
|
||||
func (img *visUint64Image) Bounds() image.Rectangle {
|
||||
return image.Rect(0, 0, 64, 2)
|
||||
}
|
||||
|
||||
func (img *visUint64Image) ColorIndexAt(x, y int) uint8 {
|
||||
switch y {
|
||||
case 0:
|
||||
if (x/8)%2 == 0 {
|
||||
return 2
|
||||
}
|
||||
return 3
|
||||
default:
|
||||
if img.u&(1<<uint(63-x)) != 0 {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
func (img *visUint64Image) At(x, y int) color.Color {
|
||||
return img.p[img.ColorIndexAt(x, y)]
|
||||
}
|
||||
|
||||
func (img *visUint64Image) toTmplImage() (*visTmplImage, error) {
|
||||
enc := png.Encoder{
|
||||
CompressionLevel: png.BestCompression,
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err := enc.Encode(&buf, img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &visTmplImage{
|
||||
Base64: base64.StdEncoding.EncodeToString(buf.Bytes()),
|
||||
Title: fmt.Sprintf("0x%x / %d", img.u, img.u),
|
||||
Alt: fmt.Sprintf("0x%x", img.u),
|
||||
Width: 64,
|
||||
Height: 2,
|
||||
}, nil
|
||||
}
|
||||
|
||||
const visHTML = `<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="description" content="rapid debug data visualization">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<title>[rapid] {{.Title}}</title>
|
||||
<style>
|
||||
{{- .RebootCSS }}
|
||||
</style>
|
||||
<style>
|
||||
{{- .VisCSS }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>{{.Title}}</h1>
|
||||
{{range .Images -}}
|
||||
<div class="vis">
|
||||
{{range . -}}
|
||||
{{range .GroupBegins}}<span title="{{.Label}}" class="group {{.Classes}}"><span class="label">{{.Label}}</span>{{end}}
|
||||
<img title="{{.Title}}" alt="{{.Alt}}" width="{{.Width}}" height="{{.Height}}" src="data:image/png;base64,{{.Base64}}">
|
||||
{{range .GroupEnds}}</span>{{end}}
|
||||
{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
const visCSS = `
|
||||
body {
|
||||
margin: 1rem;
|
||||
}
|
||||
|
||||
.vis {
|
||||
display: flex;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.vis img {
|
||||
height: 1rem;
|
||||
margin: 0.3rem;
|
||||
|
||||
image-rendering: pixelated;
|
||||
image-rendering: crisp-edges;
|
||||
image-rendering: -moz-crisp-edges;
|
||||
-ms-interpolation-mode: nearest-neighbor;
|
||||
}
|
||||
|
||||
.vis .group {
|
||||
position: relative;
|
||||
display: inline-flex;
|
||||
padding: 0 0 1rem;
|
||||
background-color: rgba(0, 0, 0, 0.05);
|
||||
border-radius: 0 0 0.5rem 0.5rem;
|
||||
border-bottom: 2px solid black;
|
||||
}
|
||||
|
||||
.vis .group.discard {
|
||||
background-color: rgba(255, 0, 0, 0.2);
|
||||
}
|
||||
|
||||
.vis .group.endless {
|
||||
background-color: white;
|
||||
}
|
||||
|
||||
.vis .group .label {
|
||||
font-size: 80%;
|
||||
white-space: nowrap;
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0.3rem;
|
||||
max-width: 100%;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.vis .group.discard > .label {
|
||||
color: red;
|
||||
}
|
||||
|
||||
.vis .group.label-0 {
|
||||
border-bottom-color: red;
|
||||
}
|
||||
.vis .group.label-1 {
|
||||
border-bottom-color: maroon;
|
||||
}
|
||||
.vis .group.label-2 {
|
||||
border-bottom-color: yellow;
|
||||
}
|
||||
.vis .group.label-3 {
|
||||
border-bottom-color: olive;
|
||||
}
|
||||
.vis .group.label-4 {
|
||||
border-bottom-color: lime;
|
||||
}
|
||||
.vis .group.label-5 {
|
||||
border-bottom-color: green;
|
||||
}
|
||||
.vis .group.label-6 {
|
||||
border-bottom-color: aqua;
|
||||
}
|
||||
.vis .group.label-7 {
|
||||
border-bottom-color: teal;
|
||||
}
|
||||
.vis .group.label-8 {
|
||||
border-bottom-color: blue;
|
||||
}
|
||||
.vis .group.label-9 {
|
||||
border-bottom-color: navy;
|
||||
}
|
||||
.vis .group.label-10 {
|
||||
border-bottom-color: fuchsia;
|
||||
}
|
||||
.vis .group.label-11 {
|
||||
border-bottom-color: purple;
|
||||
}
|
||||
.vis .group.label-12 {
|
||||
border-bottom-color: red;
|
||||
}
|
||||
.vis .group.label-13 {
|
||||
border-bottom-color: maroon;
|
||||
}
|
||||
.vis .group.label-14 {
|
||||
border-bottom-color: yellow;
|
||||
}
|
||||
.vis .group.label-15 {
|
||||
border-bottom-color: olive;
|
||||
}
|
||||
.vis .group.label-16 {
|
||||
border-bottom-color: lime;
|
||||
}
|
||||
.vis .group.label-17 {
|
||||
border-bottom-color: green;
|
||||
}
|
||||
.vis .group.label-18 {
|
||||
border-bottom-color: aqua;
|
||||
}
|
||||
.vis .group.label-19 {
|
||||
border-bottom-color: teal;
|
||||
}
|
||||
.vis .group.label-20 {
|
||||
border-bottom-color: blue;
|
||||
}
|
||||
.vis .group.label-21 {
|
||||
border-bottom-color: navy;
|
||||
}
|
||||
.vis .group.label-22 {
|
||||
border-bottom-color: fuchsia;
|
||||
}
|
||||
.vis .group.label-23 {
|
||||
border-bottom-color: purple;
|
||||
}
|
||||
`
|
||||
|
||||
const visRebootCSS = `
|
||||
/*!
|
||||
* Bootstrap Reboot v4.1.3 (https://getbootstrap.com/)
|
||||
* Copyright 2011-2018 The Bootstrap Authors
|
||||
* Copyright 2011-2018 Twitter, Inc.
|
||||
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
|
||||
* Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)
|
||||
*/
|
||||
*,
|
||||
*::before,
|
||||
*::after {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
html {
|
||||
font-family: sans-serif;
|
||||
line-height: 1.15;
|
||||
-webkit-text-size-adjust: 100%;
|
||||
-ms-text-size-adjust: 100%;
|
||||
-ms-overflow-style: scrollbar;
|
||||
-webkit-tap-highlight-color: rgba(0, 0, 0, 0);
|
||||
}
|
||||
|
||||
@-ms-viewport {
|
||||
width: device-width;
|
||||
}
|
||||
|
||||
article, aside, figcaption, figure, footer, header, hgroup, main, nav, section {
|
||||
display: block;
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
font-size: 1rem;
|
||||
font-weight: 400;
|
||||
line-height: 1.5;
|
||||
color: #212529;
|
||||
text-align: left;
|
||||
background-color: #fff;
|
||||
}
|
||||
|
||||
[tabindex="-1"]:focus {
|
||||
outline: 0 !important;
|
||||
}
|
||||
|
||||
hr {
|
||||
box-sizing: content-box;
|
||||
height: 0;
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
margin-top: 0;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
p {
|
||||
margin-top: 0;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
abbr[title],
|
||||
abbr[data-original-title] {
|
||||
text-decoration: underline;
|
||||
-webkit-text-decoration: underline dotted;
|
||||
text-decoration: underline dotted;
|
||||
cursor: help;
|
||||
border-bottom: 0;
|
||||
}
|
||||
|
||||
address {
|
||||
margin-bottom: 1rem;
|
||||
font-style: normal;
|
||||
line-height: inherit;
|
||||
}
|
||||
|
||||
ol,
|
||||
ul,
|
||||
dl {
|
||||
margin-top: 0;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
ol ol,
|
||||
ul ul,
|
||||
ol ul,
|
||||
ul ol {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
dt {
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
dd {
|
||||
margin-bottom: .5rem;
|
||||
margin-left: 0;
|
||||
}
|
||||
|
||||
blockquote {
|
||||
margin: 0 0 1rem;
|
||||
}
|
||||
|
||||
dfn {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
sub,
|
||||
sup {
|
||||
position: relative;
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -.25em;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -.5em;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #007bff;
|
||||
text-decoration: none;
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #0056b3;
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
a:not([href]):not([tabindex]) {
|
||||
color: inherit;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus {
|
||||
color: inherit;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:not([href]):not([tabindex]):focus {
|
||||
outline: 0;
|
||||
}
|
||||
|
||||
pre,
|
||||
code,
|
||||
kbd,
|
||||
samp {
|
||||
font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
font-size: 1em;
|
||||
}
|
||||
|
||||
pre {
|
||||
margin-top: 0;
|
||||
margin-bottom: 1rem;
|
||||
overflow: auto;
|
||||
-ms-overflow-style: scrollbar;
|
||||
}
|
||||
|
||||
figure {
|
||||
margin: 0 0 1rem;
|
||||
}
|
||||
|
||||
img {
|
||||
vertical-align: middle;
|
||||
border-style: none;
|
||||
}
|
||||
|
||||
svg {
|
||||
overflow: hidden;
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
caption {
|
||||
padding-top: 0.75rem;
|
||||
padding-bottom: 0.75rem;
|
||||
color: #6c757d;
|
||||
text-align: left;
|
||||
caption-side: bottom;
|
||||
}
|
||||
|
||||
th {
|
||||
text-align: inherit;
|
||||
}
|
||||
|
||||
label {
|
||||
display: inline-block;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
button {
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
button:focus {
|
||||
outline: 1px dotted;
|
||||
outline: 5px auto -webkit-focus-ring-color;
|
||||
}
|
||||
|
||||
input,
|
||||
button,
|
||||
select,
|
||||
optgroup,
|
||||
textarea {
|
||||
margin: 0;
|
||||
font-family: inherit;
|
||||
font-size: inherit;
|
||||
line-height: inherit;
|
||||
}
|
||||
|
||||
button,
|
||||
input {
|
||||
overflow: visible;
|
||||
}
|
||||
|
||||
button,
|
||||
select {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
button,
|
||||
html [type="button"],
|
||||
[type="reset"],
|
||||
[type="submit"] {
|
||||
-webkit-appearance: button;
|
||||
}
|
||||
|
||||
button::-moz-focus-inner,
|
||||
[type="button"]::-moz-focus-inner,
|
||||
[type="reset"]::-moz-focus-inner,
|
||||
[type="submit"]::-moz-focus-inner {
|
||||
padding: 0;
|
||||
border-style: none;
|
||||
}
|
||||
|
||||
input[type="radio"],
|
||||
input[type="checkbox"] {
|
||||
box-sizing: border-box;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
input[type="date"],
|
||||
input[type="time"],
|
||||
input[type="datetime-local"],
|
||||
input[type="month"] {
|
||||
-webkit-appearance: listbox;
|
||||
}
|
||||
|
||||
textarea {
|
||||
overflow: auto;
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
fieldset {
|
||||
min-width: 0;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
border: 0;
|
||||
}
|
||||
|
||||
legend {
|
||||
display: block;
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
padding: 0;
|
||||
margin-bottom: .5rem;
|
||||
font-size: 1.5rem;
|
||||
line-height: inherit;
|
||||
color: inherit;
|
||||
white-space: normal;
|
||||
}
|
||||
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
[type="number"]::-webkit-inner-spin-button,
|
||||
[type="number"]::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
[type="search"] {
|
||||
outline-offset: -2px;
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
[type="search"]::-webkit-search-cancel-button,
|
||||
[type="search"]::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
::-webkit-file-upload-button {
|
||||
font: inherit;
|
||||
-webkit-appearance: button;
|
||||
}
|
||||
|
||||
output {
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
summary {
|
||||
display: list-item;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
template {
|
||||
display: none;
|
||||
}
|
||||
|
||||
[hidden] {
|
||||
display: none !important;
|
||||
}
|
||||
`
|
||||
|
||||
// Copyright 2013 Lucas Beyer
|
||||
// Licensed under MIT (https://github.com/lucasb-eyer/go-colorful/blob/master/LICENSE)
|
||||
//
|
||||
// Hsv creates a new Color given a Hue in [0..360], a Saturation and a Value in [0..1]
|
||||
func visHsv(H, S, V float64) color.RGBA {
|
||||
Hp := H / 60.0
|
||||
C := V * S
|
||||
X := C * (1.0 - math.Abs(math.Mod(Hp, 2.0)-1.0))
|
||||
|
||||
m := V - C
|
||||
r, g, b := 0.0, 0.0, 0.0
|
||||
|
||||
switch {
|
||||
case 0.0 <= Hp && Hp < 1.0:
|
||||
r = C
|
||||
g = X
|
||||
case 1.0 <= Hp && Hp < 2.0:
|
||||
r = X
|
||||
g = C
|
||||
case 2.0 <= Hp && Hp < 3.0:
|
||||
g = C
|
||||
b = X
|
||||
case 3.0 <= Hp && Hp < 4.0:
|
||||
g = X
|
||||
b = C
|
||||
case 4.0 <= Hp && Hp < 5.0:
|
||||
r = X
|
||||
b = C
|
||||
case 5.0 <= Hp && Hp < 6.0:
|
||||
r = C
|
||||
b = X
|
||||
}
|
||||
|
||||
return color.RGBA{R: uint8((m + r) * 255), G: uint8((m + g) * 255), B: uint8((m + b) * 255), A: 255}
|
||||
}
|
||||
Reference in New Issue
Block a user