示例#1
0
func TestPseudoRand(t *testing.T) {
	numbers := make(map[int]bool)
	// Make two random number generators and pull two numbers from each.
	rand1, _ := randutil.NewPseudoRand()
	rand2, _ := randutil.NewPseudoRand()
	numbers[rand1.Int()] = true
	numbers[rand1.Int()] = true
	numbers[rand2.Int()] = true
	numbers[rand2.Int()] = true
	// All four numbers should be distinct; no seed state is shared.
	if len(numbers) != 4 {
		t.Errorf("expected 4 unique numbers; got %d", len(numbers))
	}
}
示例#2
0
// TestTransactionObservedTimestamp verifies that txn.{Get,Update}ObservedTimestamp work as
// advertised.
func TestTransactionObservedTimestamp(t *testing.T) {
	var txn Transaction
	rng, seed := randutil.NewPseudoRand()
	t.Logf("running with seed %d", seed)
	ids := append([]int{109, 104, 102, 108, 1000}, rand.Perm(100)...)
	timestamps := make(map[NodeID]hlc.Timestamp, len(ids))
	for i := 0; i < len(ids); i++ {
		timestamps[NodeID(i)] = hlc.ZeroTimestamp.Add(rng.Int63(), 0)
	}
	for i, n := range ids {
		nodeID := NodeID(n)
		if ts, ok := txn.GetObservedTimestamp(nodeID); ok {
			t.Fatalf("%d: false positive hit %s in %v", nodeID, ts, ids[:i+1])
		}
		txn.UpdateObservedTimestamp(nodeID, timestamps[nodeID])
		txn.UpdateObservedTimestamp(nodeID, hlc.MaxTimestamp) // should be noop
		if exp, act := i+1, len(txn.ObservedTimestamps); act != exp {
			t.Fatalf("%d: expected %d entries, got %d: %v", nodeID, exp, act, txn.ObservedTimestamps)
		}
	}
	for _, m := range ids {
		checkID := NodeID(m)
		exp := timestamps[checkID]
		if act, _ := txn.GetObservedTimestamp(checkID); !act.Equal(exp) {
			t.Fatalf("%d: expected %s, got %s", checkID, exp, act)
		}
	}

	var emptyTxn Transaction
	ts := hlc.ZeroTimestamp.Add(1, 2)
	emptyTxn.UpdateObservedTimestamp(NodeID(1), ts)
	if actTS, _ := emptyTxn.GetObservedTimestamp(NodeID(1)); !actTS.Equal(ts) {
		t.Fatalf("unexpected: %s (wanted %s)", actTS, ts)
	}
}
示例#3
0
func TestApproximateSize(t *testing.T) {
	defer leaktest.AfterTest(t)
	runWithAllEngines(func(engine Engine, t *testing.T) {
		var (
			count    = 10000
			keys     = make([]proto.EncodedKey, count)
			values   = make([][]byte, count) // Random values to prevent compression
			rand, _  = randutil.NewPseudoRand()
			valueLen = 10
		)
		for i := 0; i < count; i++ {
			keys[i] = []byte(fmt.Sprintf("key%8d", i))
			values[i] = randutil.RandBytes(rand, valueLen)
		}

		insertKeysAndValues(keys, values, engine, t)

		if err := engine.Flush(); err != nil {
			t.Fatalf("Error flushing InMem: %s", err)
		}

		sizePerRecord := (len([]byte(keys[0])) + valueLen)
		verifyApproximateSize(keys, engine, sizePerRecord, 0.15, t)
		verifyApproximateSize(keys[:count/2], engine, sizePerRecord, 0.15, t)
		verifyApproximateSize(keys[:count/4], engine, sizePerRecord, 0.15, t)
	}, t)
}
示例#4
0
func runMVCCConditionalPut(valueSize int, createFirst bool, b *testing.B) {
	rng, _ := randutil.NewPseudoRand()
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)

	stopper := stop.NewStopper()
	defer stopper.Stop()
	rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper)

	b.SetBytes(int64(valueSize))
	var expected *roachpb.Value
	if createFirst {
		for i := 0; i < b.N; i++ {
			key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
			ts := makeTS(timeutil.Now().UnixNano(), 0)
			if err := MVCCPut(rocksdb, nil, key, ts, value, nil); err != nil {
				b.Fatalf("failed put: %s", err)
			}
		}
		expected = &value
	}

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
		ts := makeTS(timeutil.Now().UnixNano(), 0)
		if err := MVCCConditionalPut(rocksdb, nil, key, ts, value, expected, nil); err != nil {
			b.Fatalf("failed put: %s", err)
		}
	}

	b.StopTimer()
}
示例#5
0
func TestBatchBuilderStress(t *testing.T) {
	defer leaktest.AfterTest(t)()

	stopper := stop.NewStopper()
	defer stopper.Stop()
	e := NewInMem(roachpb.Attributes{}, 1<<20, stopper)

	rng, _ := randutil.NewPseudoRand()

	for i := 0; i < 1000; i++ {
		count := 1 + rng.Intn(1000)

		func() {
			batch := e.NewBatch().(*rocksDBBatch)
			defer batch.Close()

			builder := &rocksDBBatchBuilder{}

			for j := 0; j < count; j++ {
				var ts hlc.Timestamp
				if rng.Float32() <= 0.9 {
					// Give 90% of keys timestamps.
					ts.WallTime = rng.Int63()
					if rng.Float32() <= 0.1 {
						// Give 10% of timestamps a non-zero logical component.
						ts.Logical = rng.Int31()
					}
				}
				key := MVCCKey{
					Key:       []byte(fmt.Sprintf("%d", rng.Intn(10000))),
					Timestamp: ts,
				}
				// Generate a random mixture of puts, deletes and merges.
				switch rng.Intn(3) {
				case 0:
					if err := dbPut(batch.batch, key, []byte("value")); err != nil {
						t.Fatal(err)
					}
					builder.Put(key, []byte("value"))
				case 1:
					if err := dbClear(batch.batch, key); err != nil {
						t.Fatal(err)
					}
					builder.Clear(key)
				case 2:
					if err := dbMerge(batch.batch, key, appender("bar")); err != nil {
						t.Fatal(err)
					}
					builder.Merge(key, appender("bar"))
				}
			}

			batchRepr := batch.Repr()
			builderRepr := builder.Finish()
			if !bytes.Equal(batchRepr, builderRepr) {
				t.Fatalf("expected [% x], but got [% x]", batchRepr, builderRepr)
			}
		}()
	}
}
示例#6
0
// createCluster generates a new cluster using the provided stopper and the
// number of nodes supplied. Each node will have one store to start.
func createCluster(stopper *stop.Stopper, nodeCount int) *Cluster {
	rand, seed := randutil.NewPseudoRand()
	clock := hlc.NewClock(hlc.UnixNano)
	rpcContext := rpc.NewContext(&base.Context{}, clock, stopper)
	g := gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	storePool := storage.NewStorePool(g, storage.TestTimeUntilStoreDeadOff, stopper)
	c := &Cluster{
		stopper:       stopper,
		clock:         clock,
		rpc:           rpcContext,
		gossip:        g,
		storePool:     storePool,
		allocator:     storage.MakeAllocator(storePool, storage.RebalancingOptions{}),
		storeGossiper: gossiputil.NewStoreGossiper(g),
		nodes:         make(map[proto.NodeID]*Node),
		stores:        make(map[proto.StoreID]*Store),
		ranges:        make(map[proto.RangeID]*Range),
		rand:          rand,
		seed:          seed,
	}

	// Add the nodes.
	for i := 0; i < nodeCount; i++ {
		c.addNewNodeWithStore()
	}

	// Add a single range and add to this first node's first store.
	firstRange := c.addRange()
	firstRange.attachRangeToStore(c.stores[proto.StoreID(0)])
	return c
}
示例#7
0
func TestNonsortingEncodeDecimalRand(t *testing.T) {
	rng, _ := randutil.NewPseudoRand()
	const randomTrials = 200000
	for i := 0; i < randomTrials; i++ {
		var tmp, appendTo []byte
		// Test with and without appending.
		if rng.Intn(2) == 1 {
			appendTo = randBuf(rng, 30)
			appendTo = appendTo[:rng.Intn(len(appendTo)+1)]
		}
		// Test with and without tmp buffer.
		if rng.Intn(2) == 1 {
			tmp = randBuf(rng, 100)
		}
		cur := randDecimal(rng, -20, 20)

		enc := EncodeNonsortingDecimal(appendTo, cur)
		enc = enc[len(appendTo):]
		res, err := DecodeNonsortingDecimal(enc, tmp)
		if err != nil {
			t.Fatal(err)
		}

		// Make sure we decode the same value we encoded.
		if cur.Cmp(res) != 0 {
			t.Fatalf("unexpected mismatch for %v, got %v", cur, res)
		}

		// Make sure we would have overestimated the value.
		if est := UpperBoundNonsortingDecimalSize(cur); est < len(enc) {
			t.Fatalf("expected estimate of %d for %v to be greater than or equal to the encoded length, found [% x]", est, cur, enc)
		}
	}
}
示例#8
0
func TestEncodeDecimalRand(t *testing.T) {
	rng, _ := randutil.NewPseudoRand()
	// Test both directions.
	for _, dir := range []Direction{Ascending, Descending} {
		var prev *inf.Dec
		var prevEnc []byte
		const randomTrials = 100000
		for i := 0; i < randomTrials; i++ {
			cur := randDecimal(rng, -20, 20)
			var tmp, appendTo []byte
			// Test with and without appending.
			if rng.Intn(2) == 1 {
				appendTo = randBuf(rng, 30)
				appendTo = appendTo[:rng.Intn(len(appendTo)+1)]
			}
			// Test with and without tmp buffer.
			if rng.Intn(2) == 1 {
				tmp = randBuf(rng, 100)
			}
			var enc []byte
			var res *inf.Dec
			var err error
			if dir == Ascending {
				enc = EncodeDecimalAscending(appendTo, cur)
				enc = enc[len(appendTo):]
				_, res, err = DecodeDecimalAscending(enc, tmp)
			} else {
				enc = EncodeDecimalDescending(appendTo, cur)
				enc = enc[len(appendTo):]
				_, res, err = DecodeDecimalDescending(enc, tmp)
			}
			if err != nil {
				t.Fatal(err)
			}

			testPeekLength(t, enc)

			// Make sure we decode the same value we encoded.
			if cur.Cmp(res) != 0 {
				t.Fatalf("unexpected mismatch for %v, got %v", cur, res)
			}

			// Make sure lexicographical sorting is consistent.
			if prev != nil {
				bytesCmp := bytes.Compare(prevEnc, enc)
				cmpType := "same"
				if dir == Descending {
					bytesCmp *= -1
					cmpType = "inverse"
				}
				if decCmp := prev.Cmp(cur); decCmp != bytesCmp {
					t.Fatalf("expected [% x] to compare to [% x] the %s way that %v compares to %v",
						prevEnc, enc, cmpType, prev, cur)
				}
			}
			prev = cur
			prevEnc = enc
		}
	}
}
示例#9
0
func runMVCCConditionalPut(emk engineMaker, valueSize int, createFirst bool, b *testing.B) {
	rng, _ := randutil.NewPseudoRand()
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)

	eng, stopper := emk(b, fmt.Sprintf("cput_%d", valueSize))
	defer stopper.Stop()

	b.SetBytes(int64(valueSize))
	var expected *roachpb.Value
	if createFirst {
		for i := 0; i < b.N; i++ {
			key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
			ts := makeTS(timeutil.Now().UnixNano(), 0)
			if err := MVCCPut(context.Background(), eng, nil, key, ts, value, nil); err != nil {
				b.Fatalf("failed put: %s", err)
			}
		}
		expected = &value
	}

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
		ts := makeTS(timeutil.Now().UnixNano(), 0)
		if err := MVCCConditionalPut(context.Background(), eng, nil, key, ts, value, expected, nil); err != nil {
			b.Fatalf("failed put: %s", err)
		}
	}

	b.StopTimer()
}
示例#10
0
func BenchmarkEncodeNonsortingVarint(b *testing.B) {
	bytes := make([]byte, b.N*binary.MaxVarintLen64)
	rng, _ := randutil.NewPseudoRand()
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		bytes = EncodeNonsortingVarint(bytes, rng.Int63())
	}
}
示例#11
0
func makeEncodedVals(minExp, maxExp int) [][]byte {
	rng, _ := randutil.NewPseudoRand()
	vals := make([][]byte, 10000)
	for i := range vals {
		vals[i] = EncodeDecimalAscending(nil, randDecimal(rng, minExp, maxExp))
	}
	return vals
}
示例#12
0
// makeDecimalVals creates decimal values with exponents in
// the range [minExp, maxExp].
func makeDecimalVals(minExp, maxExp int) []*inf.Dec {
	rng, _ := randutil.NewPseudoRand()
	vals := make([]*inf.Dec, 10000)
	for i := range vals {
		vals[i] = randDecimal(rng, minExp, maxExp)
	}
	return vals
}
示例#13
0
func TestRandBytes(t *testing.T) {
	rand, _ := randutil.NewPseudoRand()
	for i := 0; i < 100; i++ {
		x := randutil.RandBytes(rand, i)
		if len(x) != i {
			t.Errorf("got array with unexpected length: %d (expected %d)", len(x), i)
		}
	}
}
示例#14
0
func TestRandIntInRange(t *testing.T) {
	rand, _ := randutil.NewPseudoRand()
	for i := 0; i < 100; i++ {
		x := randutil.RandIntInRange(rand, 20, 40)
		if x < 20 || x >= 40 {
			t.Errorf("got result out of range: %d", x)
		}
	}
}
示例#15
0
func makeIntTestDatum(count int) []parser.Datum {
	rng, _ := randutil.NewPseudoRand()

	vals := make([]parser.Datum, count)
	for i := range vals {
		vals[i] = parser.NewDInt(parser.DInt(rng.Int63()))
	}
	return vals
}
示例#16
0
func makeFloatTestDatum(count int) []parser.Datum {
	rng, _ := randutil.NewPseudoRand()

	vals := make([]parser.Datum, count)
	for i := range vals {
		vals[i] = parser.DFloat(rng.Float64())
	}
	return vals
}
示例#17
0
func makeDecimalTestDatum(count int) []parser.Datum {
	rng, _ := randutil.NewPseudoRand()

	vals := make([]parser.Datum, count)
	for i := range vals {
		dd := &parser.DDecimal{}
		decimal.SetFromFloat(&dd.Dec, rng.Float64())
		vals[i] = dd
	}
	return vals
}
示例#18
0
// setupMVCCData writes up to numVersions values at each of numKeys
// keys. The number of versions written for each key is chosen
// randomly according to a uniform distribution. Each successive
// version is written starting at 5ns and then in 5ns increments. This
// allows scans at various times, starting at t=5ns, and continuing to
// t=5ns*(numVersions+1). A version for each key will be read on every
// such scan, but the dynamics of the scan will change depending on
// the historical timestamp. Earlier timestamps mean scans which must
// skip more historical versions; later timestamps mean scans which
// skip fewer.
//
// The creation of the rocksdb database is time consuming, especially
// for larger numbers of versions. The database is persisted between
// runs and stored in the current directory as
// "mvcc_scan_<versions>_<keys>_<valueBytes>".
func setupMVCCScanData(numVersions, numKeys, valueBytes int, b *testing.B) (*RocksDB, *stop.Stopper) {
	loc := fmt.Sprintf("mvcc_scan_%d_%d_%d", numVersions, numKeys, valueBytes)

	exists := true
	if _, err := os.Stat(loc); os.IsNotExist(err) {
		exists = false
	}

	const cacheSize = 8 << 30 // 8 GB
	stopper := stop.NewStopper()
	rocksdb := NewRocksDB(roachpb.Attributes{}, loc, cacheSize, stopper)
	if err := rocksdb.Open(); err != nil {
		b.Fatalf("could not create new rocksdb db instance at %s: %v", loc, err)
	}

	if exists {
		return rocksdb, stopper
	}

	log.Infof("creating mvcc data: %s", loc)

	rng, _ := randutil.NewPseudoRand()
	keys := make([]roachpb.Key, numKeys)
	nvs := make([]int, numKeys)
	for t := 1; t <= numVersions; t++ {
		walltime := int64(5 * t)
		ts := makeTS(walltime, 0)
		batch := rocksdb.NewBatch()
		for i := 0; i < numKeys; i++ {
			if t == 1 {
				keys[i] = roachpb.Key(encoding.EncodeUvarint([]byte("key-"), uint64(i)))
				nvs[i] = rand.Intn(numVersions) + 1
			}
			// Only write values if this iteration is less than the random
			// number of versions chosen for this key.
			if t <= nvs[i] {
				value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueBytes))
				value.InitChecksum(keys[i])
				if err := MVCCPut(batch, nil, keys[i], ts, value, nil); err != nil {
					b.Fatal(err)
				}
			}
		}
		if err := batch.Commit(); err != nil {
			b.Fatal(err)
		}
		batch.Close()
	}
	rocksdb.CompactRange(nil, nil)

	return rocksdb, stopper
}
示例#19
0
// TestPut starts up an N node cluster and runs N workers that write
// to independent keys.
func TestPut(t *testing.T) {
	l := localcluster.Create(*numNodes, stopper)
	l.Start()
	defer l.Stop()

	db, dbStopper := makeDBClient(t, l, 0)
	defer dbStopper.Stop()
	if err := configutil.SetDefaultRangeMaxBytes(db, *rangeMaxBytes); err != nil {
		t.Fatal(err)
	}
	checkRangeReplication(t, l, 20*time.Second)

	errs := make(chan error, *numNodes)
	start := time.Now()
	deadline := start.Add(*duration)
	var count int64
	for i := 0; i < *numNodes; i++ {
		go func() {
			r, _ := randutil.NewPseudoRand()
			value := randutil.RandBytes(r, 8192)

			for time.Now().Before(deadline) {
				k := atomic.AddInt64(&count, 1)
				v := value[:r.Intn(len(value))]
				if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil {
					errs <- err
					return
				}
			}
			errs <- nil
		}()
	}

	for i := 0; i < *numNodes; {
		select {
		case <-stopper:
			t.Fatalf("interrupted")
		case err := <-errs:
			if err != nil {
				t.Fatal(err)
			}
			i++
		case <-time.After(1 * time.Second):
			// Periodically print out progress so that we know the test is still
			// running.
			log.Infof("%d", atomic.LoadInt64(&count))
		}
	}

	elapsed := time.Since(start)
	log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例#20
0
func BenchmarkDecodeUvarint(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([][]byte, 10000)
	for i := range vals {
		vals[i] = EncodeUvarintAscending(nil, uint64(rng.Int63()))
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_, _, _ = DecodeUvarintAscending(vals[i%len(vals)])
	}
}
示例#21
0
func BenchmarkDecodeNumericFloat(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([][]byte, 10000)
	for i := range vals {
		vals[i] = EncodeNumericFloat(nil, rng.Float64())
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_, _ = DecodeNumericFloat(vals[i%len(vals)])
	}
}
示例#22
0
func BenchmarkPeekLengthBytesDescending(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([][]byte, 10000)
	for i := range vals {
		vals[i] = EncodeBytesDescending(nil, randutil.RandBytes(rng, 100))
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_, _ = PeekLength(vals[i%len(vals)])
	}
}
示例#23
0
func testNodeRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()
	if num <= 0 {
		t.Fatalf("%d nodes in cluster", num)
	}

	// One client for each node.
	initBank(t, c.PGUrl(0))

	start := timeutil.Now()
	state := testState{
		t:        t,
		errChan:  make(chan error, 1),
		teardown: make(chan struct{}),
		deadline: start.Add(cfg.Duration),
		clients:  make([]testClient, 1),
	}

	client := &state.clients[0]
	client.Lock()
	client.db = makePGClient(t, c.PGUrl(num-1))
	client.Unlock()
	go transferMoneyLoop(0, &state, *numAccounts, *maxTransfer)

	defer func() {
		<-state.teardown
	}()

	// Chaos monkey.
	rnd, seed := randutil.NewPseudoRand()
	log.Warningf("monkey starts (seed %d)", seed)
	pickNodes := func() []int {
		return []int{rnd.Intn(num - 1)}
	}
	go chaosMonkey(&state, c, false, pickNodes)

	waitClientsStop(1, &state, cfg.Stall)

	// Verify accounts.
	verifyAccounts(t, client)

	elapsed := time.Since(start)
	count := atomic.LoadUint64(&client.count)
	log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
	kvClient, kvStopper := c.NewClient(t, num-1)
	defer kvStopper.Stop()
	if pErr := kvClient.CheckConsistency(keys.TableDataMin, keys.TableDataMax); pErr != nil {
		// TODO(.*): change back to t.Fatal after #5051.
		log.Error(pErr)
	}
}
示例#24
0
// TestPut starts up an N node cluster and runs N workers that write
// to independent keys.
func TestPut(t *testing.T) {
	c := StartCluster(t)
	defer c.AssertAndStop(t)

	db, dbStopper := makeClient(t, c.ConnString(0))
	defer dbStopper.Stop()

	errs := make(chan error, c.NumNodes())
	start := time.Now()
	deadline := start.Add(*flagDuration)
	var count int64
	for i := 0; i < c.NumNodes(); i++ {
		go func() {
			r, _ := randutil.NewPseudoRand()
			value := randutil.RandBytes(r, 8192)

			for time.Now().Before(deadline) {
				k := atomic.AddInt64(&count, 1)
				v := value[:r.Intn(len(value))]
				if pErr := db.Put(fmt.Sprintf("%08d", k), v); pErr != nil {
					errs <- pErr.GoError()
					return
				}
			}
			errs <- nil
		}()
	}

	for i := 0; i < c.NumNodes(); {
		baseCount := atomic.LoadInt64(&count)
		select {
		case <-stopper:
			t.Fatalf("interrupted")
		case err := <-errs:
			if err != nil {
				t.Fatal(err)
			}
			i++
		case <-time.After(1 * time.Second):
			// Periodically print out progress so that we know the test is still
			// running.
			count := atomic.LoadInt64(&count)
			log.Infof("%d (%d/s)", count, count-baseCount)
			c.Assert(t)
		}
	}

	elapsed := time.Since(start)
	log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例#25
0
func testClusterRecoveryInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()
	if num <= 0 {
		t.Fatalf("%d nodes in cluster", num)
	}

	// One client for each node.
	initBank(t, c.PGUrl(0))

	start := timeutil.Now()
	state := testState{
		t:        t,
		errChan:  make(chan error, num),
		teardown: make(chan struct{}),
		deadline: start.Add(cfg.Duration),
		clients:  make([]testClient, num),
	}

	for i := 0; i < num; i++ {
		state.clients[i].Lock()
		state.initClient(t, c, i)
		state.clients[i].Unlock()
		go transferMoneyLoop(i, &state, *numAccounts, *maxTransfer)
	}

	defer func() {
		<-state.teardown
	}()

	// Chaos monkey.
	rnd, seed := randutil.NewPseudoRand()
	log.Warningf("monkey starts (seed %d)", seed)
	pickNodes := func() []int {
		return rnd.Perm(num)[:rnd.Intn(num)+1]
	}
	go chaosMonkey(&state, c, true, pickNodes)

	waitClientsStop(num, &state, cfg.Stall)

	// Verify accounts.
	verifyAccounts(t, &state.clients[0])

	elapsed := time.Since(start)
	var count uint64
	counts := state.counts()
	for _, c := range counts {
		count += c
	}
	log.Infof("%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例#26
0
func BenchmarkDecodeDuration(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([][]byte, 10000)
	for i := range vals {
		d := duration.Duration{Months: rng.Int63(), Days: rng.Int63(), Nanos: rng.Int63()}
		vals[i], _ = EncodeDurationAscending(nil, d)
	}

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_, _, _ = DecodeDurationAscending(vals[i%len(vals)])
	}
}
func BenchmarkDecimalSqrt(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([]*inf.Dec, 10000)
	for i := range vals {
		vals[i] = NewDecFromFloat(math.Abs(rng.Float64()))
	}

	z := new(inf.Dec)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		Sqrt(z, vals[i%len(vals)], 16)
	}
}
示例#28
0
func BenchmarkDecodeStringDescending(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([][]byte, 10000)
	for i := range vals {
		vals[i] = EncodeStringDescending(nil, string(randutil.RandBytes(rng, 100)))
	}

	buf := make([]byte, 0, 1000)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_, _, _ = DecodeStringDescending(vals[i%len(vals)], buf)
	}
}
示例#29
0
func BenchmarkEncodeString(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([]string, 10000)
	for i := range vals {
		vals[i] = string(randutil.RandBytes(rng, 100))
	}

	buf := make([]byte, 0, 1000)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_ = EncodeStringAscending(buf, vals[i%len(vals)])
	}
}
示例#30
0
func BenchmarkEncodeUvarint(b *testing.B) {
	rng, _ := randutil.NewPseudoRand()

	vals := make([]uint64, 10000)
	for i := range vals {
		vals[i] = uint64(rng.Int63())
	}

	buf := make([]byte, 0, 16)

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		_ = EncodeUvarintAscending(buf, vals[i%len(vals)])
	}
}