예제 #1
0
func TestOffsetMeasurement(t *testing.T) {
	defer leaktest.AfterTest(t)()

	stopper := stop.NewStopper()
	defer stopper.Stop()

	serverTime := time.Unix(0, 20)
	serverClock := hlc.NewClock(serverTime.UnixNano)
	serverCtx := newNodeTestContext(serverClock, stopper)
	s, ln := newTestServer(t, serverCtx, true)
	remoteAddr := ln.Addr().String()

	RegisterHeartbeatServer(s, &HeartbeatService{
		clock:              serverClock,
		remoteClockMonitor: serverCtx.RemoteClocks,
	})

	// Create a client clock that is behind the server clock.
	clientAdvancing := AdvancingClock{time: time.Unix(0, 10)}
	clientClock := hlc.NewClock(clientAdvancing.UnixNano)
	clientClock.SetMaxOffset(time.Millisecond)
	clientCtx := newNodeTestContext(clientClock, stopper)
	clientCtx.RemoteClocks.offsetTTL = 5 * clientAdvancing.getAdvancementInterval()
	if _, err := clientCtx.GRPCDial(remoteAddr); err != nil {
		t.Fatal(err)
	}

	expectedOffset := RemoteOffset{Offset: 10, Uncertainty: 0, MeasuredAt: 10}
	util.SucceedsSoon(t, func() error {
		clientCtx.RemoteClocks.mu.Lock()
		defer clientCtx.RemoteClocks.mu.Unlock()

		if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; !ok {
			return errors.Errorf("expected offset of %s to be initialized, but it was not", remoteAddr)
		} else if o != expectedOffset {
			return errors.Errorf("expected:\n%v\nactual:\n%v", expectedOffset, o)
		}
		return nil
	})

	// Change the client such that it receives a heartbeat right after the
	// maximum clock reading delay.
	clientAdvancing.setAdvancementInterval(
		maximumPingDurationMult*clientClock.MaxOffset() + 1*time.Nanosecond)

	util.SucceedsSoon(t, func() error {
		clientCtx.RemoteClocks.mu.Lock()
		defer clientCtx.RemoteClocks.mu.Unlock()

		if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; ok {
			return errors.Errorf("expected offset to have been cleared, but found %s", o)
		}
		return nil
	})
}
예제 #2
0
func TestClockOffsetMismatch(t *testing.T) {
	defer leaktest.AfterTest(t)()
	defer func() {
		if r := recover(); r != nil {
			fmt.Println(r)
			if match, _ := regexp.MatchString("locally configured maximum clock offset", r.(string)); !match {
				t.Errorf("expected clock mismatch error")
			}
		}
	}()

	clock := hlc.NewClock(hlc.UnixNano, 250*time.Millisecond)
	hs := &HeartbeatService{
		clock:              clock,
		remoteClockMonitor: newRemoteClockMonitor(clock, time.Hour),
	}

	request := &PingRequest{
		Ping:           "testManual",
		Addr:           "test",
		MaxOffsetNanos: (500 * time.Millisecond).Nanoseconds(),
	}
	ctx := context.Background()
	_, _ = hs.Ping(ctx, request)
	t.Fatal("should not reach")
}
예제 #3
0
func TestAcquireAndRelease(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, db := setup(t)
	defer s.Stopper().Stop()

	ctx := context.Background()
	manual := hlc.NewManualClock(123)
	clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
	lm := client.NewLeaseManager(db, clock, client.LeaseManagerOptions{ClientID: clientID1})

	l, err := lm.AcquireLease(ctx, leaseKey)
	if err != nil {
		t.Fatal(err)
	}
	if err := lm.ReleaseLease(ctx, l); err != nil {
		t.Fatal(err)
	}
	if err := lm.ReleaseLease(ctx, l); !testutils.IsError(err, "unexpected value") {
		t.Fatal(err)
	}

	l, err = lm.AcquireLease(ctx, leaseKey)
	if err != nil {
		t.Fatal(err)
	}
	if err := lm.ReleaseLease(ctx, l); err != nil {
		t.Fatal(err)
	}
}
예제 #4
0
// TestTimestampSelectionInOptions verifies that a client can set the
// Txn timestamp using client.TxnExecOptions.
func TestTimestampSelectionInOptions(t *testing.T) {
	defer leaktest.AfterTest(t)()
	db := NewDB(newTestSender(nil, nil))
	txn := NewTxn(context.Background(), *db)

	mc := hlc.NewManualClock(100)
	clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
	execOpt := TxnExecOptions{
		Clock: clock,
	}
	refTimestamp := clock.Now()

	txnClosure := func(txn *Txn, opt *TxnExecOptions) error {
		// Ensure the KV transaction is created.
		return txn.Put("a", "b")
	}

	if err := txn.Exec(execOpt, txnClosure); err != nil {
		t.Fatal(err)
	}

	// Check the timestamp was initialized.
	if txn.Proto.OrigTimestamp.WallTime != refTimestamp.WallTime {
		t.Errorf("expected txn orig ts to be %s; got %s", refTimestamp, txn.Proto.OrigTimestamp)
	}
}
예제 #5
0
func TestHeartbeatReply(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(5)
	clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
	heartbeat := &HeartbeatService{
		clock:              clock,
		remoteClockMonitor: newRemoteClockMonitor(clock, time.Hour),
	}

	request := &PingRequest{
		Ping: "testPing",
	}
	response, err := heartbeat.Ping(context.Background(), request)
	if err != nil {
		t.Fatal(err)
	}

	if response.Pong != request.Ping {
		t.Errorf("expected %s to be equal to %s", response.Pong, request.Ping)
	}

	if response.ServerTime != 5 {
		t.Errorf("expected server time 5, instead %d", response.ServerTime)
	}
}
예제 #6
0
// TestTimestampCacheEqualTimestamp verifies that in the event of two
// non-overlapping transactions with equal timestamps, the returned
// timestamp is not owned by either one.
func TestTimestampCacheEqualTimestamps(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(123)
	clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
	tc := newTimestampCache(clock)

	txn1 := uuid.MakeV4()
	txn2 := uuid.MakeV4()

	// Add two non-overlapping transactions at the same timestamp.
	ts1 := clock.Now()
	tc.add(roachpb.Key("a"), roachpb.Key("b"), ts1, &txn1, true)
	tc.add(roachpb.Key("b"), roachpb.Key("c"), ts1, &txn2, true)

	// When querying either side separately, the transaction ID is returned.
	if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("b")); !ts.Equal(ts1) {
		t.Errorf("expected 'a'-'b' to have timestamp %s, but found %s", ts1, ts)
	} else if *txn != txn1 {
		t.Errorf("expected 'a'-'b' to have txn id %s, but found %s", txn1, txn)
	}
	if ts, txn, _ := tc.GetMaxRead(roachpb.Key("b"), roachpb.Key("c")); !ts.Equal(ts1) {
		t.Errorf("expected 'b'-'c' to have timestamp %s, but found %s", ts1, ts)
	} else if *txn != txn2 {
		t.Errorf("expected 'b'-'c' to have txn id %s, but found %s", txn2, txn)
	}

	// Querying a span that overlaps both returns a nil txn ID; neither
	// can proceed here.
	if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("c")); !ts.Equal(ts1) {
		t.Errorf("expected 'a'-'c' to have timestamp %s, but found %s", ts1, ts)
	} else if txn != nil {
		t.Errorf("expected 'a'-'c' to have nil txn id, but found %s", txn)
	}
}
예제 #7
0
파일: network.go 프로젝트: hvaara/cockroach
// NewNetwork creates nodeCount gossip nodes.
func NewNetwork(stopper *stop.Stopper, nodeCount int, createResolvers bool) *Network {
	log.Infof(context.TODO(), "simulating gossip network with %d nodes", nodeCount)

	n := &Network{
		Nodes:   []*Node{},
		Stopper: stopper,
	}
	n.rpcContext = rpc.NewContext(
		log.AmbientContext{},
		&base.Config{Insecure: true},
		hlc.NewClock(hlc.UnixNano, time.Nanosecond),
		n.Stopper,
	)
	var err error
	n.tlsConfig, err = n.rpcContext.GetServerTLSConfig()
	if err != nil {
		log.Fatal(context.TODO(), err)
	}

	for i := 0; i < nodeCount; i++ {
		node, err := n.CreateNode()
		if err != nil {
			log.Fatal(context.TODO(), err)
		}
		// Build a resolver for each instance or we'll get data races.
		if createResolvers {
			r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr())
			if err != nil {
				log.Fatalf(context.TODO(), "bad gossip address %s: %s", n.Nodes[0].Addr(), err)
			}
			node.Gossip.SetResolvers([]resolver.Resolver{r})
		}
	}
	return n
}
예제 #8
0
func TestReacquireLease(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, db := setup(t)
	defer s.Stopper().Stop()

	ctx := context.Background()
	manual := hlc.NewManualClock(123)
	clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
	lm := client.NewLeaseManager(db, clock, client.LeaseManagerOptions{ClientID: clientID1})

	l, err := lm.AcquireLease(ctx, leaseKey)
	if err != nil {
		t.Fatal(err)
	}

	// We allow re-acquiring the same lease as long as the client ID is
	// the same to allow a client to reacquire its own leases rather than
	// having to wait them out if it crashes and restarts.
	l, err = lm.AcquireLease(ctx, leaseKey)
	if err != nil {
		t.Fatal(err)
	}
	if err := lm.ReleaseLease(ctx, l); err != nil {
		t.Fatal(err)
	}
}
예제 #9
0
// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(123)
	storeCfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
	storeCfg.TestingKnobs.DisableSplitQueue = true
	store, stopper := createTestStoreWithConfig(t, storeCfg)
	defer stopper.Stop()

	// Split the range.
	aDesc, bDesc, pErr := createSplitRanges(store)
	if pErr != nil {
		t.Fatal(pErr)
	}

	// Write some values left and right of the proposed split key.
	writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
	writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))

	// Get the range stats for both ranges now that we have data.
	snap := store.Engine().NewSnapshot()
	defer snap.Close()
	msA, err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID)
	if err != nil {
		t.Fatal(err)
	}
	msB, err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID)
	if err != nil {
		t.Fatal(err)
	}

	// Stats should agree with recomputation.
	if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
		t.Fatalf("failed to verify range A's stats before split: %v", err)
	}
	if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
		t.Fatalf("failed to verify range B's stats before split: %v", err)
	}

	manual.Increment(100)

	// Merge the b range back into the a range.
	args := adminMergeArgs(roachpb.KeyMin)
	if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
		t.Fatal(err)
	}
	replMerged := store.LookupReplica(aDesc.StartKey, nil)

	// Get the range stats for the merged range and verify.
	snap = store.Engine().NewSnapshot()
	defer snap.Close()
	msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, replMerged.RangeID)
	if err != nil {
		t.Fatal(err)
	}

	// Merged stats should agree with recomputation.
	if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
		t.Errorf("failed to verify range's stats after merge: %v", err)
	}
}
예제 #10
0
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	manual := hlc.NewManualClock(123)
	clock := hlc.NewClock(manual.UnixNano, 20*time.Nanosecond)

	senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
		br := ba.CreateReply()
		txnClone := ba.Txn.Clone()
		br.Txn = &txnClone
		br.Txn.Writing = true
		return br, nil
	}
	ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
	ts := NewTxnCoordSender(
		ambient, senderFn(senderFunc), clock, false, stopper, MakeTxnMetrics(metric.TestSampleInterval),
	)

	// Stop the stopper manually, prior to trying the transaction. This has the
	// effect of returning a NodeUnavailableError for any attempts at launching
	// a heartbeat goroutine.
	stopper.Stop()

	var ba roachpb.BatchRequest
	key := roachpb.Key("test")
	ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
	ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
	ba.Add(&roachpb.EndTransactionRequest{})
	ba.Txn = &roachpb.Transaction{Name: "test"}
	_, pErr := ts.Send(context.Background(), ba)
	if pErr != nil {
		t.Fatal(pErr)
	}
}
예제 #11
0
func TestHeartbeatCB(t *testing.T) {
	defer leaktest.AfterTest(t)()

	stopper := stop.NewStopper()
	defer stopper.Stop()

	clock := hlc.NewClock(time.Unix(0, 20).UnixNano, time.Nanosecond)
	serverCtx := newNodeTestContext(clock, stopper)
	s, ln := newTestServer(t, serverCtx, true)
	remoteAddr := ln.Addr().String()

	RegisterHeartbeatServer(s, &HeartbeatService{
		clock:              clock,
		remoteClockMonitor: serverCtx.RemoteClocks,
	})

	// Clocks don't matter in this test.
	clientCtx := newNodeTestContext(clock, stopper)

	var once sync.Once
	ch := make(chan struct{})

	clientCtx.HeartbeatCB = func() {
		once.Do(func() {
			close(ch)
		})
	}

	_, err := clientCtx.GRPCDial(remoteAddr)
	if err != nil {
		t.Fatal(err)
	}

	<-ch
}
예제 #12
0
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
	defer leaktest.AfterTest(t)()
	const count = 3
	const runTime = 100 * time.Millisecond
	const maxError = 7500 * time.Microsecond
	durations := []time.Duration{
		15 * time.Millisecond,
		25 * time.Millisecond,
	}
	for i, duration := range durations {
		testutils.SucceedsSoon(t, func() error {
			ranges := newTestRangeSet(count, t)
			q := &testQueue{}
			s := newReplicaScanner(log.AmbientContext{}, duration, 0, ranges)
			s.AddQueues(q)
			mc := hlc.NewManualClock(123)
			clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
			stopper := stop.NewStopper()
			s.Start(clock, stopper)
			time.Sleep(runTime)
			stopper.Stop()

			avg := s.avgScan()
			log.Infof(context.Background(), "%d: average scan: %s", i, avg)
			if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
				duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
				return errors.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
			}
			return nil
		})
	}
}
예제 #13
0
파일: node_test.go 프로젝트: knz/cockroach
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
	addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
	cfg := storage.StoreConfig{}

	stopper := stop.NewStopper()
	cfg.Clock = hlc.NewClock(hlc.UnixNano)
	nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
	cfg.ScanInterval = 10 * time.Hour
	cfg.ConsistencyCheckInterval = 10 * time.Hour
	grpcServer := rpc.NewServer(nodeRPCContext)
	serverCfg := makeTestConfig()
	cfg.Gossip = gossip.NewTest(
		0,
		nodeRPCContext,
		grpcServer,
		serverCfg.GossipBootstrapResolvers,
		stopper,
		metric.NewRegistry(),
	)
	ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
	if err != nil {
		t.Fatal(err)
	}
	if gossipBS != nil {
		// Handle possibility of a :0 port specification.
		if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
			gossipBS = ln.Addr()
		}
		r, err := resolver.NewResolverFromAddress(gossipBS)
		if err != nil {
			t.Fatalf("bad gossip address %s: %s", gossipBS, err)
		}
		cfg.Gossip.SetResolvers([]resolver.Resolver{r})
		cfg.Gossip.Start(ln.Addr())
	}
	retryOpts := base.DefaultRetryOptions()
	retryOpts.Closer = stopper.ShouldQuiesce()
	distSender := kv.NewDistSender(kv.DistSenderConfig{
		Clock:           cfg.Clock,
		RPCContext:      nodeRPCContext,
		RPCRetryOptions: &retryOpts,
	}, cfg.Gossip)
	cfg.AmbientCtx.Tracer = tracing.NewTracer()
	sender := kv.NewTxnCoordSender(
		cfg.AmbientCtx,
		distSender,
		cfg.Clock,
		false,
		stopper,
		kv.MakeTxnMetrics(metric.TestSampleInterval),
	)
	cfg.DB = client.NewDB(sender)
	cfg.Transport = storage.NewDummyRaftTransport()
	cfg.MetricsSampleInterval = metric.TestSampleInterval
	node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
		kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
	roachpb.RegisterInternalServer(grpcServer, node)
	return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
예제 #14
0
func TestVerifyClockOffset(t *testing.T) {
	defer leaktest.AfterTest(t)()

	clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 50*time.Nanosecond)
	monitor := newRemoteClockMonitor(clock, time.Hour)

	for idx, tc := range []struct {
		offsets       []RemoteOffset
		expectedError bool
	}{
		// no error if no offsets.
		{[]RemoteOffset{}, false},
		// no error when a majority of offsets are under the maximum tolerated offset.
		{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 48, Uncertainty: 20}, {Offset: 61, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, false},
		// error when less than a majority of offsets are under the maximum tolerated offset.
		{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 58, Uncertainty: 20}, {Offset: 85, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, true},
	} {
		monitor.mu.offsets = make(map[string]RemoteOffset)
		for i, offset := range tc.offsets {
			monitor.mu.offsets[strconv.Itoa(i)] = offset
		}

		if tc.expectedError {
			if err := monitor.VerifyClockOffset(context.TODO()); !testutils.IsError(err, errOffsetGreaterThanMaxOffset) {
				t.Errorf("%d: unexpected error %v", idx, err)
			}
		} else {
			if err := monitor.VerifyClockOffset(context.TODO()); err != nil {
				t.Errorf("%d: unexpected error %s", idx, err)
			}
		}
	}
}
예제 #15
0
func TestClockOffsetMetrics(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()

	clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 20*time.Nanosecond)
	monitor := newRemoteClockMonitor(clock, time.Hour)
	monitor.mu.offsets = map[string]RemoteOffset{
		"0": {
			Offset:      13,
			Uncertainty: 7,
			MeasuredAt:  6,
		},
	}

	if err := monitor.VerifyClockOffset(context.TODO()); err != nil {
		t.Fatal(err)
	}

	if a, e := monitor.Metrics().ClockOffsetMeanNanos.Value(), int64(13); a != e {
		t.Errorf("mean %d != expected %d", a, e)
	}
	if a, e := monitor.Metrics().ClockOffsetStdDevNanos.Value(), int64(7); a != e {
		t.Errorf("stdDev %d != expected %d", a, e)
	}
}
예제 #16
0
func TestStoresVisitStores(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ls := NewStores(log.AmbientContext{}, hlc.NewClock(hlc.UnixNano))
	numStores := 10
	for i := 0; i < numStores; i++ {
		ls.AddStore(&Store{Ident: roachpb.StoreIdent{StoreID: roachpb.StoreID(i)}})
	}

	visit := make([]bool, numStores)
	err := ls.VisitStores(func(s *Store) error { visit[s.Ident.StoreID] = true; return nil })
	if err != nil {
		t.Errorf("unexpected error on visit: %s", err.Error())
	}

	for i, visited := range visit {
		if !visited {
			t.Errorf("store %d was not visited", i)
		}
	}

	errBoom := errors.New("boom")
	if err := ls.VisitStores(func(s *Store) error {
		return errBoom
	}); err != errBoom {
		t.Errorf("got unexpected error %v", err)
	}
}
예제 #17
0
func TestTimestampCacheClear(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(123)
	clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
	tc := newTimestampCache(clock)

	key := roachpb.Key("a")

	ts := clock.Now()
	tc.add(key, nil, ts, nil, true)

	manual.Increment(5000000)

	expTS := clock.Now()
	// Clear the cache, which will reset the low water mark to
	// the current time.
	tc.Clear(expTS)

	// Fetching any keys should give current time.
	if rTS, _, ok := tc.GetMaxRead(key, nil); ok {
		t.Errorf("expected %s to have cleared timestamp", key)
	} else if !rTS.Equal(expTS) {
		t.Errorf("expected %s, got %s", rTS, expTS)
	}
}
예제 #18
0
// TestTimestampCacheNoEviction verifies that even after
// the MinTSCacheWindow interval, if the cache has not hit
// its size threshold, it will not evict entries.
func TestTimestampCacheNoEviction(t *testing.T) {
	defer leaktest.AfterTest(t)()
	manual := hlc.NewManualClock(123)
	clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
	tc := newTimestampCache(clock)

	// Increment time to the low water mark + 1.
	manual.Increment(1)
	aTS := clock.Now()
	tc.add(roachpb.Key("a"), nil, aTS, nil, true)
	tc.AddRequest(cacheRequest{
		reads:     []roachpb.Span{{Key: roachpb.Key("c")}},
		timestamp: aTS,
	})

	// Increment time by the MinTSCacheWindow and add another key.
	manual.Increment(MinTSCacheWindow.Nanoseconds())
	tc.add(roachpb.Key("b"), nil, clock.Now(), nil, true)
	tc.AddRequest(cacheRequest{
		reads:     []roachpb.Span{{Key: roachpb.Key("d")}},
		timestamp: clock.Now(),
	})

	// Verify that the cache still has 4 entries in it
	if l, want := tc.len(), 4; l != want {
		t.Errorf("expected %d entries to remain, got %d", want, l)
	}
}
예제 #19
0
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	manual := hlc.NewManualClock(0)
	clock := hlc.NewClock(manual.UnixNano)
	clock.SetMaxOffset(20)

	testCases := []struct {
		roachpb.Error
		errMsg string
	}{
		{*roachpb.NewError(roachpb.NewTransactionRetryError()), "retry txn"},
		{*roachpb.NewError(roachpb.NewTransactionPushError(roachpb.Transaction{
			TxnMeta: enginepb.TxnMeta{
				ID: uuid.NewV4(),
			}})), "failed to push"},
		{*roachpb.NewErrorf("testError"), "testError"},
	}
	for i, test := range testCases {
		func() {
			senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
				txn := ba.Txn.Clone()
				txn.Writing = true
				pErr := &roachpb.Error{}
				*pErr = test.Error
				pErr.SetTxn(&txn)
				return nil, pErr
			}
			ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
			ts := NewTxnCoordSender(
				ambient,
				senderFn(senderFunc),
				clock,
				false,
				stopper,
				MakeTxnMetrics(metric.TestSampleInterval),
			)

			var ba roachpb.BatchRequest
			key := roachpb.Key("test")
			ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
			ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
			ba.Add(&roachpb.EndTransactionRequest{})
			ba.Txn = &roachpb.Transaction{Name: "test"}
			_, pErr := ts.Send(context.Background(), ba)
			if !testutils.IsPError(pErr, test.errMsg) {
				t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr)
			}

			defer teardownHeartbeats(ts)
			ts.Lock()
			defer ts.Unlock()
			if len(ts.txns) != 1 {
				t.Errorf("%d: expected transaction to be tracked", i)
			}
		}()
	}
}
예제 #20
0
func newInsecureRPCContext(stopper *stop.Stopper) *rpc.Context {
	return rpc.NewContext(
		log.AmbientContext{},
		&base.Config{Insecure: true},
		hlc.NewClock(hlc.UnixNano, time.Nanosecond),
		stopper,
	)
}
예제 #21
0
func TestLeasesMultipleClients(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, db := setup(t)
	defer s.Stopper().Stop()

	ctx := context.Background()
	manual1 := hlc.NewManualClock(123)
	clock1 := hlc.NewClock(manual1.UnixNano, time.Nanosecond)
	manual2 := hlc.NewManualClock(123)
	clock2 := hlc.NewClock(manual2.UnixNano, time.Nanosecond)
	lm1 := client.NewLeaseManager(db, clock1, client.LeaseManagerOptions{ClientID: clientID1})
	lm2 := client.NewLeaseManager(db, clock2, client.LeaseManagerOptions{ClientID: clientID2})

	l1, err := lm1.AcquireLease(ctx, leaseKey)
	if err != nil {
		t.Fatal(err)
	}
	_, err = lm2.AcquireLease(ctx, leaseKey)
	if !testutils.IsError(err, "is not available until") {
		t.Fatalf("didn't get expected error trying to acquire already held lease: %v", err)
	}
	if _, ok := err.(*client.LeaseNotAvailableError); !ok {
		t.Fatalf("expected LeaseNotAvailableError, got %v", err)
	}

	// Ensure a lease can be "stolen" after it's expired.
	manual2.Increment(int64(client.DefaultLeaseDuration) + 1)
	l2, err := lm2.AcquireLease(ctx, leaseKey)
	if err != nil {
		t.Fatal(err)
	}

	// lm1's clock indicates that its lease should still be valid, but it doesn't
	// own it anymore.
	manual1.Increment(int64(client.DefaultLeaseDuration) / 2)
	if err := lm1.ExtendLease(ctx, l1); !testutils.IsError(err, "out of sync with DB state") {
		t.Fatalf("didn't get expected error trying to extend expired lease: %v", err)
	}
	if err := lm1.ReleaseLease(ctx, l1); !testutils.IsError(err, "unexpected value") {
		t.Fatalf("didn't get expected error trying to release stolen lease: %v", err)
	}

	if err := lm2.ReleaseLease(ctx, l2); err != nil {
		t.Fatal(err)
	}
}
예제 #22
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
	ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
	nc := &base.NodeIDContainer{}
	ambient.AddLogTag("n", nc)

	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}

	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
	server := rpc.NewServer(rpcContext) // never started
	ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
	ltc.Stopper.AddCloser(ltc.Eng)

	ltc.Stores = storage.NewStores(ambient, ltc.Clock)

	ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
		ltc.Gossip)
	if ltc.DBContext == nil {
		dbCtx := client.DefaultDBContext()
		ltc.DBContext = &dbCtx
	}
	ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
	transport := storage.NewDummyRaftTransport()
	cfg := storage.TestStoreConfig()
	if ltc.RangeRetryOptions != nil {
		cfg.RangeRetryOptions = *ltc.RangeRetryOptions
	}
	cfg.AmbientCtx = ambient
	cfg.Clock = ltc.Clock
	cfg.DB = ltc.DB
	cfg.Gossip = ltc.Gossip
	cfg.Transport = transport
	cfg.MetricsSampleInterval = metric.TestSampleInterval
	ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	nc.Set(context.TODO(), nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
예제 #23
0
func TestStoresAddStore(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ls := NewStores(log.AmbientContext{}, hlc.NewClock(hlc.UnixNano))
	store := Store{}
	ls.AddStore(&store)
	if !ls.HasStore(store.Ident.StoreID) {
		t.Errorf("expected local sender to contain storeID=%d", store.Ident.StoreID)
	}
	if ls.HasStore(store.Ident.StoreID + 1) {
		t.Errorf("expected local sender to not contain storeID=%d", store.Ident.StoreID+1)
	}
}
예제 #24
0
// TestScannerDisabled verifies that disabling a scanner prevents
// replicas from being added to queues.
func TestScannerDisabled(t *testing.T) {
	defer leaktest.AfterTest(t)()
	const count = 3
	ranges := newTestRangeSet(count, t)
	q := &testQueue{}
	s := newReplicaScanner(log.AmbientContext{}, 1*time.Millisecond, 0, ranges)
	s.AddQueues(q)
	mc := hlc.NewManualClock(123)
	clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
	stopper := stop.NewStopper()
	s.Start(clock, stopper)
	defer stopper.Stop()

	// Verify queue gets all ranges.
	testutils.SucceedsSoon(t, func() error {
		if q.count() != count {
			return errors.Errorf("expected %d replicas; have %d", count, q.count())
		}
		if s.scanCount() == 0 {
			return errors.Errorf("expected scanner count to increment")
		}
		return nil
	})

	lastWaitEnabledCount := s.waitEnabledCount()

	// Now, disable the scanner.
	s.SetDisabled(true)
	testutils.SucceedsSoon(t, func() error {
		if s.waitEnabledCount() == lastWaitEnabledCount {
			return errors.Errorf("expected scanner to stop when disabled")
		}
		return nil
	})

	lastScannerCount := s.scanCount()

	// Remove the replicas and verify the scanner still removes them while disabled.
	ranges.Visit(func(repl *Replica) bool {
		s.RemoveReplica(repl)
		return true
	})

	testutils.SucceedsSoon(t, func() error {
		if qc := q.count(); qc != 0 {
			return errors.Errorf("expected queue to be empty after replicas removed from scanner; got %d", qc)
		}
		return nil
	})
	if sc := s.scanCount(); sc != lastScannerCount {
		t.Errorf("expected scanner count to not increment: %d != %d", sc, lastScannerCount)
	}
}
예제 #25
0
// TestUncertaintyRestart verifies that a transaction which finds a write in
// its near future will restart exactly once, meaning that it's made a note of
// that node's clock for its new timestamp.
func TestUncertaintyRestart(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const maxOffset = 250 * time.Millisecond
	dbCtx := client.DefaultDBContext()
	s := &localtestcluster.LocalTestCluster{
		Clock:     hlc.NewClock(hlc.UnixNano, maxOffset),
		DBContext: &dbCtx,
	}
	s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	defer s.Stop()
	if err := disableOwnNodeCertain(s); err != nil {
		t.Fatal(err)
	}
	s.Manual.Increment(s.Clock.MaxOffset().Nanoseconds() + 1)

	var key = roachpb.Key("a")

	errChan := make(chan error)
	start := make(chan struct{})
	go func() {
		<-start
		errChan <- s.DB.Txn(context.TODO(), func(txn *client.Txn) error {
			return txn.Put(key, "hi")
		})
	}()

	if err := s.DB.Txn(context.TODO(), func(txn *client.Txn) error {
		if txn.Proto.Epoch > 2 {
			t.Fatal("expected only one restart")
		}
		// Issue a read to pick a timestamp.
		if _, err := txn.Get(key.Next()); err != nil {
			t.Fatal(err)
		}
		if txn.Proto.Epoch == 0 {
			close(start) // let someone write into our future
			// when they're done, try to read
			if err := <-errChan; err != nil {
				t.Fatal(err)
			}
		}
		if _, err := txn.Get(key.Next()); err != nil {
			if _, ok := err.(*roachpb.ReadWithinUncertaintyIntervalError); !ok {
				t.Fatalf("unexpected error: %T: %s", err, err)
			}
		}
		return nil
	}); err != nil {
		t.Fatal(err)
	}
}
예제 #26
0
// NewClient implements the Cluster interface.
func (l *LocalCluster) NewClient(ctx context.Context, i int) (*roachClient.DB, error) {
	rpcContext := rpc.NewContext(log.AmbientContext{}, &base.Config{
		User:       security.NodeUser,
		SSLCA:      filepath.Join(l.CertsDir, security.EmbeddedCACert),
		SSLCert:    filepath.Join(l.CertsDir, security.EmbeddedNodeCert),
		SSLCertKey: filepath.Join(l.CertsDir, security.EmbeddedNodeKey),
	}, hlc.NewClock(hlc.UnixNano, 0), l.stopper)
	conn, err := rpcContext.GRPCDial(l.Nodes[i].Addr(ctx, DefaultTCP).String())
	if err != nil {
		return nil, err
	}
	return roachClient.NewDB(roachClient.NewSender(conn)), nil
}
예제 #27
0
// TestRangeCommandClockUpdate verifies that followers update their
// clocks when executing a command, even if the lease holder's clock is far
// in the future.
func TestRangeCommandClockUpdate(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const numNodes = 3
	var manuals []*hlc.ManualClock
	var clocks []*hlc.Clock
	for i := 0; i < numNodes; i++ {
		manuals = append(manuals, hlc.NewManualClock(1))
		clocks = append(clocks, hlc.NewClock(manuals[i].UnixNano))
		clocks[i].SetMaxOffset(100 * time.Millisecond)
	}
	mtc := &multiTestContext{clocks: clocks}
	mtc.Start(t, numNodes)
	defer mtc.Stop()
	mtc.replicateRange(1, 1, 2)

	// Advance the lease holder's clock ahead of the followers (by more than
	// MaxOffset but less than the range lease) and execute a command.
	manuals[0].Increment(int64(500 * time.Millisecond))
	incArgs := incrementArgs([]byte("a"), 5)
	ts := clocks[0].Now()
	if _, err := client.SendWrappedWith(context.Background(), rg1(mtc.stores[0]), roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
		t.Fatal(err)
	}

	// Wait for that command to execute on all the followers.
	util.SucceedsSoon(t, func() error {
		values := []int64{}
		for _, eng := range mtc.engines {
			val, _, err := engine.MVCCGet(context.Background(), eng, roachpb.Key("a"), clocks[0].Now(), true, nil)
			if err != nil {
				return err
			}
			values = append(values, mustGetInt(val))
		}
		if !reflect.DeepEqual(values, []int64{5, 5, 5}) {
			return errors.Errorf("expected (5, 5, 5), got %v", values)
		}
		return nil
	})

	// Verify that all the followers have accepted the clock update from
	// node 0 even though it comes from outside the usual max offset.
	now := clocks[0].Now()
	for i, clock := range clocks {
		// Only compare the WallTimes: it's normal for clock 0 to be a few logical ticks ahead.
		if clock.Now().WallTime < now.WallTime {
			t.Errorf("clock %d is behind clock 0: %s vs %s", i, clock.Now(), now)
		}
	}
}
예제 #28
0
func TestFailedOffsetMeasurement(t *testing.T) {
	defer leaktest.AfterTest(t)()

	stopper := stop.NewStopper()
	defer stopper.Stop()

	// Can't be zero because that'd be an empty offset.
	clock := hlc.NewClock(time.Unix(0, 1).UnixNano, time.Nanosecond)

	serverCtx := newNodeTestContext(clock, stopper)
	s, ln := newTestServer(t, serverCtx, true)
	remoteAddr := ln.Addr().String()

	heartbeat := &ManualHeartbeatService{
		clock:              clock,
		remoteClockMonitor: serverCtx.RemoteClocks,
		ready:              make(chan struct{}),
		stopper:            stopper,
	}
	RegisterHeartbeatServer(s, heartbeat)

	// Create a client that never receives a heartbeat after the first.
	clientCtx := newNodeTestContext(clock, stopper)
	// Increase the timeout so that failure arises from exceeding the maximum
	// clock reading delay, not the timeout.
	clientCtx.HeartbeatTimeout = 20 * clientCtx.HeartbeatInterval
	if _, err := clientCtx.GRPCDial(remoteAddr); err != nil {
		t.Fatal(err)
	}
	heartbeat.ready <- struct{}{} // Allow one heartbeat for initialization.

	testutils.SucceedsSoon(t, func() error {
		clientCtx.RemoteClocks.mu.Lock()
		defer clientCtx.RemoteClocks.mu.Unlock()

		if _, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; !ok {
			return errors.Errorf("expected offset of %s to be initialized, but it was not", remoteAddr)
		}
		return nil
	})

	testutils.SucceedsSoon(t, func() error {
		serverCtx.RemoteClocks.mu.Lock()
		defer serverCtx.RemoteClocks.mu.Unlock()

		if o, ok := serverCtx.RemoteClocks.mu.offsets[remoteAddr]; ok {
			return errors.Errorf("expected offset of %s to not be initialized, but it was: %v", remoteAddr, o)
		}
		return nil
	})
}
예제 #29
0
// TestRetryableError verifies that Send returns a retryable error
// when it hits an RPC error.
func TestRetryableError(t *testing.T) {
	defer leaktest.AfterTest(t)()

	clientStopper := stop.NewStopper()
	defer clientStopper.Stop()
	clientContext := newNodeTestContext(hlc.NewClock(hlc.UnixNano, time.Nanosecond), clientStopper)

	serverStopper := stop.NewStopper()
	serverContext := newNodeTestContext(hlc.NewClock(hlc.UnixNano, time.Nanosecond), serverStopper)

	s, ln := newTestServer(t, serverContext)
	roachpb.RegisterInternalServer(s, Node(0))

	addr := ln.Addr().String()
	if _, err := clientContext.GRPCDial(addr); err != nil {
		t.Fatal(err)
	}
	// Wait until the client becomes healthy and shut down the server.
	util.SucceedsSoon(t, func() error {
		if !clientContext.IsConnHealthy(addr) {
			return errors.Errorf("client not yet healthy")
		}
		return nil
	})
	serverStopper.Stop()
	// Wait until the client becomes unhealthy.
	util.SucceedsSoon(t, func() error {
		if clientContext.IsConnHealthy(addr) {
			return errors.Errorf("client not yet unhealthy")
		}
		return nil
	})

	opts := SendOptions{ctx: context.Background()}
	if _, err := sendBatch(opts, []net.Addr{ln.Addr()}, clientContext); err == nil {
		t.Fatalf("Unexpected success")
	}
}
예제 #30
0
func TestStoresRemoveStore(t *testing.T) {
	defer leaktest.AfterTest(t)()
	ls := NewStores(log.AmbientContext{}, hlc.NewClock(hlc.UnixNano))

	storeID := roachpb.StoreID(89)

	ls.AddStore(&Store{Ident: roachpb.StoreIdent{StoreID: storeID}})

	ls.RemoveStore(&Store{Ident: roachpb.StoreIdent{StoreID: storeID}})

	if ls.HasStore(storeID) {
		t.Errorf("expted local sender to remove storeID=%d", storeID)
	}
}