Ejemplo n.º 1
0
func runMVCCConditionalPut(valueSize int, createFirst bool, b *testing.B) {
	rng, _ := randutil.NewPseudoRand()
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)

	stopper := stop.NewStopper()
	defer stopper.Stop()
	rocksdb := NewInMem(roachpb.Attributes{}, testCacheSize, stopper)

	b.SetBytes(int64(valueSize))
	var expected *roachpb.Value
	if createFirst {
		for i := 0; i < b.N; i++ {
			key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
			ts := makeTS(timeutil.Now().UnixNano(), 0)
			if err := MVCCPut(rocksdb, nil, key, ts, value, nil); err != nil {
				b.Fatalf("failed put: %s", err)
			}
		}
		expected = &value
	}

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
		ts := makeTS(timeutil.Now().UnixNano(), 0)
		if err := MVCCConditionalPut(rocksdb, nil, key, ts, value, expected, nil); err != nil {
			b.Fatalf("failed put: %s", err)
		}
	}

	b.StopTimer()
}
Ejemplo n.º 2
0
func (z *zeroSum) monitor(d time.Duration) {
	start := timeutil.Now()
	lastTime := start
	var lastOps uint64

	for ticks := 0; true; ticks++ {
		time.Sleep(d)

		if ticks%20 == 0 {
			fmt.Printf("_elapsed__accounts_________ops__ops/sec___errors___splits____xfers___ranges_____________replicas\n")
		}

		now := timeutil.Now()
		elapsed := now.Sub(lastTime).Seconds()
		ops := atomic.LoadUint64(&z.stats.ops)

		z.ranges.Lock()
		ranges, replicas := z.ranges.count, z.ranges.replicas
		z.ranges.Unlock()

		fmt.Printf("%8s %9d %11d %8.1f %8d %8d %8d %8d %20s\n",
			time.Duration(now.Sub(start).Seconds()+0.5)*time.Second,
			z.accountsLen(), ops, float64(ops-lastOps)/elapsed,
			atomic.LoadUint64(&z.stats.errors),
			atomic.LoadUint64(&z.stats.splits),
			atomic.LoadUint64(&z.stats.transfers),
			ranges, z.formatReplicas(replicas))
		lastTime = now
		lastOps = ops
	}
}
Ejemplo n.º 3
0
func testGossipPeeringsInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	num := c.NumNodes()

	deadline := timeutil.Now().Add(cfg.Duration)

	waitTime := longWaitTime
	if cfg.Duration < waitTime {
		waitTime = shortWaitTime
	}

	for timeutil.Now().Before(deadline) {
		checkGossip(t, c, waitTime, hasPeers(num))

		// Restart the first node.
		log.Infof(context.Background(), "restarting node 0")
		if err := c.Restart(0); err != nil {
			t.Fatal(err)
		}
		checkGossip(t, c, waitTime, hasPeers(num))

		// Restart another node (if there is one).
		var pickedNode int
		if num > 1 {
			pickedNode = rand.Intn(num-1) + 1
		}
		log.Infof(context.Background(), "restarting node %d", pickedNode)
		if err := c.Restart(pickedNode); err != nil {
			t.Fatal(err)
		}
		checkGossip(t, c, waitTime, hasPeers(num))
	}
}
Ejemplo n.º 4
0
func runMVCCConditionalPut(emk engineMaker, valueSize int, createFirst bool, b *testing.B) {
	rng, _ := randutil.NewPseudoRand()
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
	keyBuf := append(make([]byte, 0, 64), []byte("key-")...)

	eng, stopper := emk(b, fmt.Sprintf("cput_%d", valueSize))
	defer stopper.Stop()

	b.SetBytes(int64(valueSize))
	var expected *roachpb.Value
	if createFirst {
		for i := 0; i < b.N; i++ {
			key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
			ts := makeTS(timeutil.Now().UnixNano(), 0)
			if err := MVCCPut(context.Background(), eng, nil, key, ts, value, nil); err != nil {
				b.Fatalf("failed put: %s", err)
			}
		}
		expected = &value
	}

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
		ts := makeTS(timeutil.Now().UnixNano(), 0)
		if err := MVCCConditionalPut(context.Background(), eng, nil, key, ts, value, expected, nil); err != nil {
			b.Fatalf("failed put: %s", err)
		}
	}

	b.StopTimer()
}
Ejemplo n.º 5
0
// AfterTest snapshots the currently-running goroutines and returns a
// function to be run at the end of tests to see whether any
// goroutines leaked.
func AfterTest(t testing.TB) func() {
	orig := map[string]bool{}
	for _, g := range interestingGoroutines() {
		orig[g] = true
	}
	return func() {
		// Loop, waiting for goroutines to shut down.
		// Wait up to 5 seconds, but finish as quickly as possible.
		deadline := timeutil.Now().Add(5 * time.Second)
		for {
			var leaked []string
			for _, g := range interestingGoroutines() {
				if !orig[g] {
					leaked = append(leaked, g)
				}
			}
			if len(leaked) == 0 {
				return
			}
			if timeutil.Now().Before(deadline) {
				time.Sleep(50 * time.Millisecond)
				continue
			}
			for _, g := range leaked {
				t.Errorf("Leaked goroutine: %v", g)
			}
			return
		}
	}
}
Ejemplo n.º 6
0
func TestPeekType(t *testing.T) {
	encodedDurationAscending, _ := EncodeDurationAscending(nil, duration.Duration{})
	encodedDurationDescending, _ := EncodeDurationDescending(nil, duration.Duration{})
	testCases := []struct {
		enc []byte
		typ Type
	}{
		{EncodeNullAscending(nil), Null},
		{EncodeNotNullAscending(nil), NotNull},
		{EncodeNullDescending(nil), Null},
		{EncodeNotNullDescending(nil), NotNull},
		{EncodeVarintAscending(nil, 0), Int},
		{EncodeVarintDescending(nil, 0), Int},
		{EncodeUvarintAscending(nil, 0), Int},
		{EncodeUvarintDescending(nil, 0), Int},
		{EncodeFloatAscending(nil, 0), Float},
		{EncodeFloatDescending(nil, 0), Float},
		{EncodeDecimalAscending(nil, inf.NewDec(0, 0)), Decimal},
		{EncodeDecimalDescending(nil, inf.NewDec(0, 0)), Decimal},
		{EncodeBytesAscending(nil, []byte("")), Bytes},
		{EncodeBytesDescending(nil, []byte("")), BytesDesc},
		{EncodeTimeAscending(nil, timeutil.Now()), Time},
		{EncodeTimeDescending(nil, timeutil.Now()), TimeDesc},
		{encodedDurationAscending, Duration},
		{encodedDurationDescending, Duration},
	}
	for i, c := range testCases {
		typ := PeekType(c.enc)
		if c.typ != typ {
			t.Fatalf("%d: expected %d, but found %d", i, c.typ, typ)
		}
	}
}
Ejemplo n.º 7
0
func TestAdminAPIUIData(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s := StartTestServer(t)
	defer s.Stop()

	start := timeutil.Now()

	mustSetUIData := func(key string, val []byte) {
		var resp struct{}
		b64Val := base64.StdEncoding.EncodeToString(val)
		reqBody := fmt.Sprintf(`{"key": "%s", "value": "%s"}`, key, b64Val)
		if err := apiPost(s, "uidata", reqBody, &resp); err != nil {
			t.Fatal(err)
		}
	}

	expectValueEquals := func(key string, expVal []byte) {
		var resp GetUIDataResponse
		url := fmt.Sprintf("uidata?key=%s", key)
		if err := apiGet(s, url, &resp); err != nil {
			t.Fatal(err)
		}
		if !reflect.DeepEqual(resp.Value, expVal) {
			t.Fatalf("value for key %s = %v != expected %v", key, resp.Value, expVal)
		}

		// Sanity check LastUpdated.
		now := timeutil.Now()
		lastUpdated := time.Unix(resp.LastUpdated.Sec, int64(resp.LastUpdated.Nsec))
		if lastUpdated.Before(start) {
			t.Fatalf("lastUpdated %s < start %s", lastUpdated, start)
		}
		if lastUpdated.After(now) {
			t.Fatalf("lastUpdated %s > now %s", lastUpdated, now)
		}
	}

	expectKeyNotFound := func(key string) {
		var resp GetUIDataResponse
		url := fmt.Sprintf("uidata?key=%s", key)
		expErr := "key " + key + " not found"
		if err := apiGet(s, url, &resp); !testutils.IsError(err, expErr) {
			t.Fatalf("unexpected error: %s", err)
		}
	}

	// Basic tests.
	mustSetUIData("k1", []byte("v1"))
	expectValueEquals("k1", []byte("v1"))
	expectKeyNotFound("NON_EXISTENT_KEY")

	// Write a binary blob with all possible byte values, then verify it.
	var buf bytes.Buffer
	for i := 0; i < 997; i++ {
		buf.WriteByte(byte(i % 256))
	}
	mustSetUIData("bin", buf.Bytes())
	expectValueEquals("bin", buf.Bytes())
}
Ejemplo n.º 8
0
// Execute the statement(s) in the given request and returns a response.
// On error, the returned integer is an HTTP error code.
func (e *Executor) Execute(args Request) (Response, int, error) {
	defer func(start time.Time) {
		e.latency.RecordValue(timeutil.Now().Sub(start).Nanoseconds())
	}(timeutil.Now())
	results := e.ExecuteStatements(
		args.User, args.Session, args.SQL, args.Params)
	return Response{Results: results, Session: args.Session}, 0, nil
}
Ejemplo n.º 9
0
// insertLoad add a very basic load that inserts into a unique table and checks
// that the inserted values are indeed correct.
func insertLoad(t *testing.T, dc *dynamicClient, ID int) {
	// Initialize the db.
	if _, err := dc.exec(`CREATE DATABASE IF NOT EXISTS Insert`); err != nil {
		t.Fatal(err)
	}

	tableName := fmt.Sprintf("Insert.Table%d", ID)
	createTableStatement := fmt.Sprintf(`
CREATE TABLE %s (
	key INT PRIMARY KEY,
	value INT NOT NULL
)`, tableName)
	insertStatement := fmt.Sprintf(`INSERT INTO %s (key, value) VALUES ($1, $1)`, tableName)
	selectStatement := fmt.Sprintf(`SELECT key-value AS "total" FROM %s WHERE key = $1`, tableName)

	// Init the db for the basic insert.
	if _, err := dc.exec(createTableStatement); err != nil {
		t.Fatal(err)
	}

	var valueCheck, valueInsert int
	nextUpdate := timeutil.Now()

	// Perform inserts and selects
	for dc.isRunning() {

		// Insert some values.
		valueInsert++
		if _, err := dc.exec(insertStatement, valueInsert); err != nil {
			if err == errTestFinished {
				return
			}
			t.Fatal(err)
		}

		// Check that another value is still correct.
		valueCheck--
		if valueCheck < 1 {
			valueCheck = valueInsert
		}

		var total int
		if err := dc.queryRowScan(selectStatement, []interface{}{valueCheck}, []interface{}{&total}); err != nil {
			if err == errTestFinished {
				return
			}
			t.Fatal(err)
		}
		if total != 0 {
			t.Fatalf("total expected to be 0, is %d", total)
		}

		if timeutil.Now().After(nextUpdate) {
			log.Infof("Insert %d: inserted and checked %d values", ID, valueInsert)
			nextUpdate = timeutil.Now().Add(time.Second)
		}
	}
}
Ejemplo n.º 10
0
// Logs returns the log entries parsed from the log files stored on
// the server. Log entries are returned in reverse chronological order. The
// following options are available:
// * "starttime" query parameter filters the log entries to only ones that
//   occurred on or after the "starttime". Defaults to a day ago.
// * "endtime" query parameter filters the log entries to only ones that
//   occurred before on on the "endtime". Defaults to the current time.
// * "pattern" query parameter filters the log entries by the provided regexp
//   pattern if it exists. Defaults to nil.
// * "max" query parameter is the hard limit of the number of returned log
//   entries. Defaults to defaultMaxLogEntries.
// * "level" query parameter filters the log entries to be those of the
//   corresponding severity level or worse. Defaults to "info".
func (s *statusServer) Logs(ctx context.Context, req *serverpb.LogsRequest) (*serverpb.JSONResponse, error) {
	log.Flush()

	var sev log.Severity
	if len(req.Level) == 0 {
		sev = log.InfoLog
	} else {
		var sevFound bool
		sev, sevFound = log.SeverityByName(req.Level)
		if !sevFound {
			return nil, fmt.Errorf("level could not be determined: %s", req.Level)
		}
	}

	startTimestamp, err := parseInt64WithDefault(
		req.StartTime,
		timeutil.Now().AddDate(0, 0, -1).UnixNano())
	if err != nil {
		return nil, grpc.Errorf(codes.InvalidArgument, "StartTime could not be parsed: %s", err)
	}

	endTimestamp, err := parseInt64WithDefault(req.EndTime, timeutil.Now().UnixNano())
	if err != nil {
		return nil, grpc.Errorf(codes.InvalidArgument, "EndTime could not be parsed: %s", err)
	}

	if startTimestamp > endTimestamp {
		return nil, grpc.Errorf(codes.InvalidArgument, "StartTime: %d should not be greater than endtime: %d", startTimestamp, endTimestamp)
	}

	maxEntries, err := parseInt64WithDefault(req.Max, defaultMaxLogEntries)
	if err != nil {
		return nil, grpc.Errorf(codes.InvalidArgument, "Max could not be parsed: %s", err)
	}
	if maxEntries < 1 {
		return nil, grpc.Errorf(codes.InvalidArgument, "Max: %d should be set to a value greater than 0", maxEntries)
	}

	var regex *regexp.Regexp
	if len(req.Pattern) > 0 {
		if regex, err = regexp.Compile(req.Pattern); err != nil {
			return nil, grpc.Errorf(codes.InvalidArgument, "regex pattern could not be compiled: %s", err)
		}
	}

	entries, err := log.FetchEntriesFromFiles(sev, startTimestamp, endTimestamp, int(maxEntries), regex)
	if err != nil {
		return nil, err
	}

	return marshalJSONResponse(entries)
}
Ejemplo n.º 11
0
// Wait until all clients have stopped.
func waitClientsStop(num int, state *testState, stallDuration time.Duration) {
	prevRound := atomic.LoadUint64(&state.monkeyIteration)
	stallTime := timeutil.Now().Add(stallDuration)
	var prevOutput string
	// Spin until all clients are shut.
	for numShutClients := 0; numShutClients < num; {
		select {
		case <-state.teardown:
		case <-stopper:
			state.t.Fatal("interrupted")

		case err := <-state.errChan:
			if err != nil {
				state.t.Error(err)
			}
			numShutClients++

		case <-time.After(time.Second):
			var newOutput string
			if timeutil.Now().Before(state.deadline) {
				curRound := atomic.LoadUint64(&state.monkeyIteration)
				if curRound == prevRound {
					if timeutil.Now().After(stallTime) {
						atomic.StoreInt32(&state.stalled, 1)
						state.t.Fatalf("Stall detected at round %d, no forward progress for %s", curRound, stallDuration)
					}
				} else {
					prevRound = curRound
					stallTime = timeutil.Now().Add(stallDuration)
				}
				// Periodically print out progress so that we know the test is
				// still running and making progress.
				counts := state.counts()
				strCounts := make([]string, len(counts))
				for i := range counts {
					strCounts[i] = strconv.FormatUint(counts[i], 10)
				}
				newOutput = fmt.Sprintf("round %d: client counts: (%s)", curRound, strings.Join(strCounts, ", "))
			} else {
				newOutput = fmt.Sprintf("test finished, waiting for shutdown of %d clients", num-numShutClients)
			}
			// This just stops the logs from being a bit too spammy.
			if newOutput != prevOutput {
				log.Infof(newOutput)
				prevOutput = newOutput
			}
		}
	}
}
Ejemplo n.º 12
0
// RetryForDuration will retry the given function until it either returns
// without error, or the given duration has elapsed. The function is invoked
// immediately at first and then successively with an exponential backoff
// starting at 1ns and ending at the specified duration.
func RetryForDuration(duration time.Duration, fn func() error) error {
	deadline := timeutil.Now().Add(duration)
	var lastErr error
	for wait := time.Duration(1); timeutil.Now().Before(deadline); wait *= 2 {
		lastErr = fn()
		if lastErr == nil {
			return nil
		}
		if wait > time.Second {
			wait = time.Second
		}
		time.Sleep(wait)
	}
	return lastErr
}
Ejemplo n.º 13
0
func testPutInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	db, dbStopper := c.NewClient(t, 0)
	defer dbStopper.Stop()

	errs := make(chan error, c.NumNodes())
	start := timeutil.Now()
	deadline := start.Add(cfg.Duration)
	var count int64
	for i := 0; i < c.NumNodes(); i++ {
		go func() {
			r, _ := randutil.NewPseudoRand()
			value := randutil.RandBytes(r, 8192)

			for timeutil.Now().Before(deadline) {
				k := atomic.AddInt64(&count, 1)
				v := value[:r.Intn(len(value))]
				if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil {
					errs <- err
					return
				}
			}
			errs <- nil
		}()
	}

	for i := 0; i < c.NumNodes(); {
		baseCount := atomic.LoadInt64(&count)
		select {
		case <-stopper:
			t.Fatalf("interrupted")
		case err := <-errs:
			if err != nil {
				t.Fatal(err)
			}
			i++
		case <-time.After(1 * time.Second):
			// Periodically print out progress so that we know the test is still
			// running.
			loadedCount := atomic.LoadInt64(&count)
			log.Infof(context.Background(), "%d (%d/s)", loadedCount, loadedCount-baseCount)
			c.Assert(t)
			cluster.Consistent(t, c)
		}
	}

	elapsed := timeutil.Since(start)
	log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
Ejemplo n.º 14
0
// stop stops the cluster.
func (l *LocalCluster) stop() {
	if *waitOnStop {
		log.Infof("waiting for interrupt")
		select {
		case <-l.stopper:
		}
	}

	log.Infof("stopping")

	l.mu.Lock()
	defer l.mu.Unlock()

	if l.monitorCtxCancelFunc != nil {
		l.monitorCtxCancelFunc()
		l.monitorCtxCancelFunc = nil
	}

	if l.vols != nil {
		maybePanic(l.vols.Kill())
		maybePanic(l.vols.Remove())
		l.vols = nil
	}
	if l.CertsDir != "" {
		_ = os.RemoveAll(l.CertsDir)
		l.CertsDir = ""
	}
	outputLogDir := l.logDir
	for i, n := range l.Nodes {
		ci, err := n.Inspect()
		crashed := err != nil || (!ci.State.Running && ci.State.ExitCode != 0)
		maybePanic(n.Kill())
		if crashed && outputLogDir == "" {
			outputLogDir = util.CreateTempDir(util.PanicTester, "crashed_nodes")
		}
		if crashed || l.logDir != "" {
			// TODO(bdarnell): make these filenames more consistent with
			// structured logs?
			file := filepath.Join(outputLogDir, nodeStr(i),
				fmt.Sprintf("stderr.%s.log", strings.Replace(
					timeutil.Now().Format(time.RFC3339), ":", "_", -1)))

			maybePanic(os.MkdirAll(filepath.Dir(file), 0777))
			w, err := os.Create(file)
			maybePanic(err)
			defer w.Close()
			maybePanic(n.Logs(w))
			if crashed {
				log.Infof("node %d: stderr at %s", i, file)
			}
		}
		maybePanic(n.Remove())
	}
	l.Nodes = nil

	if l.networkID != "" {
		maybePanic(l.client.NetworkRemove(context.Background(), l.networkID))
		l.networkID = ""
	}
}
Ejemplo n.º 15
0
func BenchmarkMVCCPutDelete_RocksDB(b *testing.B) {
	const cacheSize = 1 << 30 // 1 GB

	rocksdb, stopper := setupMVCCInMemRocksDB(b, "put_delete")
	defer stopper.Stop()

	r := rand.New(rand.NewSource(int64(timeutil.Now().UnixNano())))
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(r, 10))
	zeroTS := roachpb.ZeroTimestamp
	var blockNum int64

	for i := 0; i < b.N; i++ {
		blockID := r.Int63()
		blockNum++
		key := encoding.EncodeVarintAscending(nil, blockID)
		key = encoding.EncodeVarintAscending(key, blockNum)

		if err := MVCCPut(context.Background(), rocksdb, nil, key, zeroTS, value, nil /* txn */); err != nil {
			b.Fatal(err)
		}
		if err := MVCCDelete(context.Background(), rocksdb, nil, key, zeroTS, nil /* txn */); err != nil {
			b.Fatal(err)
		}
	}
}
Ejemplo n.º 16
0
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(start time.Time, clock *hlc.Clock, stopper *stop.Stopper,
	repl *Replica) bool {
	waitInterval := rs.paceInterval(start, timeutil.Now())
	rs.waitTimer.Reset(waitInterval)
	if log.V(6) {
		log.Infof("Wait time interval set to %s", waitInterval)
	}
	for {
		select {
		case <-rs.waitTimer.C:
			rs.waitTimer.Read = true
			if repl == nil {
				return false
			}

			return !stopper.RunTask(func() {
				// Try adding replica to all queues.
				for _, q := range rs.queues {
					q.MaybeAdd(repl, clock.Now())
				}
			})
		case repl := <-rs.removed:
			// Remove replica from all queues as applicable.
			for _, q := range rs.queues {
				q.MaybeRemove(repl)
			}
			if log.V(6) {
				log.Infof("removed replica %s", repl)
			}
		case <-stopper.ShouldStop():
			return true
		}
	}
}
Ejemplo n.º 17
0
func (sp *StorePool) String() string {
	sp.mu.Lock()
	defer sp.mu.Unlock()

	ids := make(roachpb.StoreIDSlice, 0, len(sp.mu.stores))
	for id := range sp.mu.stores {
		ids = append(ids, id)
	}
	sort.Sort(ids)

	var buf bytes.Buffer
	now := timeutil.Now()

	for _, id := range ids {
		detail := sp.mu.stores[id]
		fmt.Fprintf(&buf, "%d", id)
		if detail.dead {
			_, _ = buf.WriteString("*")
		}
		fmt.Fprintf(&buf, ": range-count=%d fraction-used=%.2f",
			detail.desc.Capacity.RangeCount, detail.desc.Capacity.FractionUsed())
		throttled := detail.throttledUntil.Sub(now)
		if throttled > 0 {
			fmt.Fprintf(&buf, " [throttled=%.1fs]", throttled.Seconds())
		}
		_, _ = buf.WriteString("\n")
	}
	return buf.String()
}
Ejemplo n.º 18
0
func BenchmarkMVCCPutDelete(b *testing.B) {
	const cacheSize = 1 << 30 // 1 GB

	stopper := stop.NewStopper()
	rocksdb := NewInMem(roachpb.Attributes{}, cacheSize, stopper)
	defer stopper.Stop()

	r := rand.New(rand.NewSource(int64(timeutil.Now().UnixNano())))
	value := roachpb.MakeValueFromBytes(randutil.RandBytes(r, 10))
	zeroTS := roachpb.ZeroTimestamp
	var blockNum int64

	for i := 0; i < b.N; i++ {
		blockID := r.Int63()
		blockNum++
		key := encoding.EncodeVarintAscending(nil, blockID)
		key = encoding.EncodeVarintAscending(key, blockNum)

		if err := MVCCPut(rocksdb, nil, key, zeroTS, value, nil /* txn */); err != nil {
			b.Fatal(err)
		}
		if err := MVCCDelete(rocksdb, nil, key, zeroTS, nil /* txn */); err != nil {
			b.Fatal(err)
		}
	}
}
Ejemplo n.º 19
0
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(
	start time.Time, clock *hlc.Clock, stopper *stop.Stopper, repl *Replica,
) bool {
	waitInterval := rs.paceInterval(start, timeutil.Now())
	rs.waitTimer.Reset(waitInterval)
	if log.V(6) {
		log.Infof(context.TODO(), "wait timer interval set to %s", waitInterval)
	}
	for {
		select {
		case <-rs.waitTimer.C:
			if log.V(6) {
				log.Infof(context.TODO(), "wait timer fired")
			}
			rs.waitTimer.Read = true
			if repl == nil {
				return false
			}

			return nil != stopper.RunTask(func() {
				// Try adding replica to all queues.
				for _, q := range rs.queues {
					q.MaybeAdd(repl, clock.Now())
				}
			})

		case repl := <-rs.removed:
			rs.removeReplica(repl)

		case <-stopper.ShouldStop():
			return true
		}
	}
}
Ejemplo n.º 20
0
func generateUniqueBytes(nodeID roachpb.NodeID) DBytes {
	// Unique bytes are composed of the current time in nanoseconds and the
	// node-id. If the nanosecond value is the same on two consecutive calls to
	// timeutil.Now() the nanoseconds value is incremented. The node-id is varint
	// encoded. Since node-ids are allocated consecutively starting at 1, the
	// node-id field will consume 1 or 2 bytes for any reasonably sized cluster.
	//
	// TODO(pmattis): Do we have to worry about persisting the milliseconds value
	// periodically to avoid the clock ever going backwards (e.g. due to NTP
	// adjustment)?
	nanos := uint64(timeutil.Now().UnixNano())
	uniqueBytesState.Lock()
	if nanos <= uniqueBytesState.nanos {
		nanos = uniqueBytesState.nanos + 1
	}
	uniqueBytesState.nanos = nanos
	uniqueBytesState.Unlock()

	b := make([]byte, 0, 8+binary.MaxVarintLen32)
	b = encoding.EncodeUint64Ascending(b, nanos)
	// We use binary.PutUvarint instead of encoding.EncodeUvarint because the
	// former uses less space for values < 128 which is a common occurrence for
	// node IDs.
	n := binary.PutUvarint(b[len(b):len(b)+binary.MaxVarintLen32], uint64(nodeID))
	return DBytes(b[:len(b)+n])
}
Ejemplo n.º 21
0
// TestScannerPaceInterval tests that paceInterval returns the correct interval.
func TestScannerPaceInterval(t *testing.T) {
	defer leaktest.AfterTest(t)()
	const count = 3
	durations := []time.Duration{
		30 * time.Millisecond,
		60 * time.Millisecond,
		500 * time.Millisecond,
	}
	// function logs an error when the actual value is not close
	// to the expected value
	logErrorWhenNotCloseTo := func(expected, actual time.Duration) {
		delta := 1 * time.Millisecond
		if actual < expected-delta || actual > expected+delta {
			t.Errorf("Expected duration %s, got %s", expected, actual)
		}
	}
	for _, duration := range durations {
		startTime := timeutil.Now()
		ranges := newTestRangeSet(count, t)
		s := newReplicaScanner(duration, 0, ranges)
		interval := s.paceInterval(startTime, startTime)
		logErrorWhenNotCloseTo(duration/count, interval)
		// The range set is empty
		ranges = newTestRangeSet(0, t)
		s = newReplicaScanner(duration, 0, ranges)
		interval = s.paceInterval(startTime, startTime)
		logErrorWhenNotCloseTo(duration, interval)
		ranges = newTestRangeSet(count, t)
		s = newReplicaScanner(duration, 0, ranges)
		// Move the present to duration time into the future
		interval = s.paceInterval(startTime, startTime.Add(duration))
		logErrorWhenNotCloseTo(0, interval)
	}
}
Ejemplo n.º 22
0
// newTemplate returns a partially-filled template.
// It should be further populated based on whether the cert is for a CA or node.
func newTemplate(commonName string) (*x509.Certificate, error) {
	// Generate a random serial number.
	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
	serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
	if err != nil {
		return nil, err
	}

	notBefore := timeutil.Now().Add(validFrom)
	notAfter := notBefore.Add(validFor)

	cert := &x509.Certificate{
		SerialNumber: serialNumber,
		Subject: pkix.Name{
			Organization: []string{"Cockroach"},
			CommonName:   commonName,
		},
		NotBefore: notBefore,
		NotAfter:  notAfter,

		KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
	}

	return cert, nil
}
Ejemplo n.º 23
0
func (t *logicTest) success(file string) {
	t.progress++
	now := timeutil.Now()
	if now.Sub(t.lastProgress) >= 2*time.Second {
		t.lastProgress = now
		fmt.Printf("%s: %d\n", file, t.progress)
	}
}
Ejemplo n.º 24
0
// newClient creates and returns a client struct.
func newClient(addr net.Addr) *client {
	return &client{
		createdAt: timeutil.Now(),
		addr:      addr,
		remoteHighWaterStamps: map[roachpb.NodeID]int64{},
		closer:                make(chan struct{}),
	}
}
Ejemplo n.º 25
0
// If the time is greater than the timestamp stored at `key`, run `f`.
// Before running `f`, the timestamp is updated forward by a small amount via
// a compare-and-swap to ensure at-most-one concurrent execution. After `f`
// executes the timestamp is set to the next execution time.
// Returns how long until `f` should be run next (i.e. when this method should
// be called again).
func (s *Server) maybeRunPeriodicCheck(op string, key roachpb.Key, f func()) time.Duration {
	resp, err := s.db.Get(key)
	if err != nil {
		log.Infof("Error reading %s time: %v", op, err)
		return updateCheckRetryFrequency
	}

	// We should early returned below if either the next check time is in the
	// future or if the atomic compare-and-set of that time failed (which
	// would happen if two nodes tried at the same time).
	if resp.Exists() {
		whenToCheck, pErr := resp.Value.GetTime()
		if pErr != nil {
			log.Warningf("Error decoding %s time: %v", op, err)
			return updateCheckRetryFrequency
		} else if delay := whenToCheck.Sub(timeutil.Now()); delay > 0 {
			return delay
		}

		nextRetry := whenToCheck.Add(updateCheckRetryFrequency)
		if err := s.db.CPut(key, nextRetry, whenToCheck); err != nil {
			if log.V(2) {
				log.Infof("Could not set next version check time (maybe another node checked?)", err)
			}
			return updateCheckRetryFrequency
		}
	} else {
		log.Infof("No previous %s time.", op)
		nextRetry := timeutil.Now().Add(updateCheckRetryFrequency)
		// CPut with `nil` prev value to assert that no other node has checked.
		if err := s.db.CPut(key, nextRetry, nil); err != nil {
			if log.V(2) {
				log.Infof("Could not set %s time (maybe another node checked?): %v", op, err)
			}
			return updateCheckRetryFrequency
		}
	}

	f()

	if err := s.db.Put(key, timeutil.Now().Add(updateCheckFrequency)); err != nil {
		log.Infof("Error updating %s time: %v", op, err)
	}
	return updateCheckFrequency
}
Ejemplo n.º 26
0
// scanLoop loops endlessly, scanning through replicas available via
// the replica set, or until the scanner is stopped. The iteration
// is paced to complete a full scan in approximately the scan interval.
func (rs *replicaScanner) scanLoop(clock *hlc.Clock, stopper *stop.Stopper) {
	stopper.RunWorker(func() {
		start := timeutil.Now()

		// waitTimer is reset in each call to waitAndProcess.
		defer rs.waitTimer.Stop()

		for {
			if rs.GetDisabled() {
				if done := rs.waitEnabled(stopper); done {
					return
				}
				continue
			}
			var shouldStop bool
			count := 0
			rs.replicas.Visit(func(repl *Replica) bool {
				count++
				shouldStop = rs.waitAndProcess(start, clock, stopper, repl)
				return !shouldStop
			})
			if count == 0 {
				// No replicas processed, just wait.
				shouldStop = rs.waitAndProcess(start, clock, stopper, nil)
			}

			shouldStop = shouldStop || nil != stopper.RunTask(func() {
				// Increment iteration count.
				rs.mu.Lock()
				defer rs.mu.Unlock()
				rs.mu.scanCount++
				rs.mu.total += timeutil.Since(start)
				if log.V(6) {
					log.Infof(context.TODO(), "reset replica scan iteration")
				}

				// Reset iteration and start time.
				start = timeutil.Now()
			})
			if shouldStop {
				return
			}
		}
	})
}
Ejemplo n.º 27
0
// newClient creates and returns a client struct.
func newClient(addr net.Addr, nodeMetrics metrics) *client {
	return &client{
		createdAt: timeutil.Now(),
		addr:      addr,
		remoteHighWaterStamps: map[roachpb.NodeID]int64{},
		closer:                make(chan struct{}),
		clientMetrics:         makeMetrics(metric.NewRegistry()),
		nodeMetrics:           nodeMetrics,
	}
}
Ejemplo n.º 28
0
func TestLeaseManagerReacquire(testingT *testing.T) {
	defer leaktest.AfterTest(testingT)()
	ctx := server.MakeTestContext()
	var releaseWaiter leaseReleaseWaiter
	ctx.TestingKnobs = base.TestingKnobs{
		SQLLeaseManager: &csql.LeaseManagerTestingKnobs{
			LeaseStoreTestingKnobs: csql.LeaseStoreTestingKnobs{
				LeaseReleasedEvent: releaseWaiter.LeaseReleasedNotification,
			},
		},
	}
	t := newLeaseTest(testingT, &ctx)
	defer t.cleanup()

	const descID = keys.LeaseTableID

	// Acquire 2 leases from the same node. They should point to the same lease
	// structure.
	l1 := t.mustAcquire(1, descID, 0)
	l2 := t.mustAcquire(1, descID, 0)
	if l1 != l2 {
		t.Fatalf("expected same lease, but found %p != %p", l1, l2)
	}
	if l1.Refcount() != 2 {
		t.Fatalf("expected refcount of 2, but found %d", l1.Refcount())
	}
	t.expectLeases(descID, "/1/1")

	// Set the minimum lease duration such that the next lease acquisition will
	// require the lease to be reacquired.
	savedLeaseDuration, savedMinLeaseDuration := csql.LeaseDuration, csql.MinLeaseDuration
	defer func() {
		csql.LeaseDuration, csql.MinLeaseDuration = savedLeaseDuration, savedMinLeaseDuration
	}()
	csql.MinLeaseDuration = l1.Expiration().Sub(timeutil.Now())
	csql.LeaseDuration = 2 * csql.MinLeaseDuration

	// Another lease acquisition from the same node will result in a new lease.
	l3 := t.mustAcquire(1, descID, 0)
	if l1 == l3 {
		t.Fatalf("expected different leases, but found %p", l1)
	}
	if l3.Refcount() != 1 {
		t.Fatalf("expected refcount of 1, but found %d", l3.Refcount())
	}
	if l3.Expiration().Before(l1.Expiration()) {
		t.Fatalf("expected new lease expiration (%s) to be after old lease expiration (%s)",
			l3.Expiration(), l1.Expiration())
	}
	t.expectLeases(descID, "/1/1 /1/1")

	t.mustRelease(1, l1, nil)
	t.mustRelease(1, l2, &releaseWaiter)
	t.mustRelease(1, l3, nil)
}
Ejemplo n.º 29
0
// monotonicUnixNano returns a monotonically increasing value for
// nanoseconds in Unix time. Since equal times are ignored with
// updates to infos, we're careful to avoid incorrectly ignoring a
// newly created value in the event one is created within the same
// nanosecond. Really unlikely except for the case of unittests, but
// better safe than sorry.
func monotonicUnixNano() int64 {
	monoTime.Lock()
	defer monoTime.Unlock()

	now := timeutil.Now().UnixNano()
	if now <= monoTime.last {
		now = monoTime.last + 1
	}
	monoTime.last = now
	return now
}
Ejemplo n.º 30
0
func testAdminLossOfQuorumInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
	if c.NumNodes() < 2 {
		t.Logf("skipping test %s because given cluster has too few nodes", cfg.Name)
		return
	}

	// Get the ids for each node.
	nodeIDs := make([]roachpb.NodeID, c.NumNodes())
	for i := 0; i < c.NumNodes(); i++ {
		var details serverpb.DetailsResponse
		if err := util.GetJSON(cluster.HTTPClient, c.URL(i)+"/_status/details/local", &details); err != nil {
			t.Fatal(err)
		}
		nodeIDs[i] = details.NodeID
	}

	// Leave only the first node alive.
	for i := 1; i < c.NumNodes(); i++ {
		if err := c.Kill(i); err != nil {
			t.Fatal(err)
		}
	}

	// Retrieve node statuses.
	var nodes serverpb.NodesResponse
	if err := util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes", &nodes); err != nil {
		t.Fatal(err)
	}

	for _, nodeID := range nodeIDs {
		var nodeStatus status.NodeStatus
		if err := util.GetJSON(cluster.HTTPClient, c.URL(0)+"/_status/nodes/"+strconv.Itoa(int(nodeID)), &nodeStatus); err != nil {
			t.Fatal(err)
		}
	}

	// Retrieve time-series data.
	nowNanos := timeutil.Now().UnixNano()
	queryRequest := tspb.TimeSeriesQueryRequest{
		StartNanos: nowNanos - 10*time.Second.Nanoseconds(),
		EndNanos:   nowNanos,
		Queries: []tspb.Query{
			{Name: "doesnt_matter", Sources: []string{}},
		},
	}
	var queryResponse tspb.TimeSeriesQueryResponse
	if err := util.PostJSON(cluster.HTTPClient, c.URL(0)+"/ts/query",
		&queryRequest, &queryResponse); err != nil {
		t.Fatal(err)
	}

	// TODO(cdo): When we're able to issue SQL queries without a quorum, test all
	// admin endpoints that issue SQL queries here.
}