コード例 #1
0
ファイル: node_test.go プロジェクト: jmptrader/cockroach
// TestCorruptedClusterID verifies that a node fails to start when a
// store's cluster ID is empty.
func TestCorruptedClusterID(t *testing.T) {
	defer leaktest.AfterTest(t)()

	e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	defer e.Close()
	if _, err := bootstrapCluster(
		storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
	); err != nil {
		t.Fatal(err)
	}

	// Set the cluster ID to the empty UUID.
	sIdent := roachpb.StoreIdent{
		ClusterID: uuid.UUID{},
		NodeID:    1,
		StoreID:   1,
	}
	if err := engine.MVCCPutProto(context.Background(), e, nil, keys.StoreIdentKey(), hlc.ZeroTimestamp, nil, &sIdent); err != nil {
		t.Fatal(err)
	}

	engines := []engine.Engine{e}
	_, serverAddr, _, node, stopper := createTestNode(util.TestAddr, engines, nil, t)
	stopper.Stop()
	if err := node.start(context.Background(), serverAddr, engines, roachpb.Attributes{}, roachpb.Locality{}); !testutils.IsError(err, "unidentified store") {
		t.Errorf("unexpected error %v", err)
	}
}
コード例 #2
0
ファイル: abort_cache_test.go プロジェクト: knz/cockroach
// createTestAbortCache creates an in-memory engine and
// returns a abort cache using the supplied Range ID.
func createTestAbortCache(
	t *testing.T, rangeID roachpb.RangeID, stopper *stop.Stopper,
) (*AbortCache, engine.Engine) {
	eng := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	stopper.AddCloser(eng)
	return NewAbortCache(rangeID), eng
}
コード例 #3
0
ファイル: node_test.go プロジェクト: jmptrader/cockroach
// TestBootstrapNewStore starts a cluster with two unbootstrapped
// stores and verifies both stores are added and started.
func TestBootstrapNewStore(t *testing.T) {
	defer leaktest.AfterTest(t)()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	if _, err := bootstrapCluster(
		storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
	); err != nil {
		t.Fatal(err)
	}

	// Start a new node with two new stores which will require bootstrapping.
	engines := Engines([]engine.Engine{
		e,
		engine.NewInMem(roachpb.Attributes{}, 1<<20),
		engine.NewInMem(roachpb.Attributes{}, 1<<20),
	})
	defer engines.Close()
	_, _, node, stopper := createAndStartTestNode(
		util.TestAddr,
		engines,
		util.TestAddr,
		roachpb.Locality{},
		t,
	)
	defer stopper.Stop()

	// Non-initialized stores (in this case the new in-memory-based
	// store) will be bootstrapped by the node upon start. This happens
	// in a goroutine, so we'll have to wait a bit until we can find the
	// new node.
	util.SucceedsSoon(t, func() error {
		if n := node.stores.GetStoreCount(); n != 3 {
			return errors.Errorf("expected 3 stores but got %d", n)
		}
		return nil
	})

	// Check whether all stores are started properly.
	if err := node.stores.VisitStores(func(s *storage.Store) error {
		if !s.IsStarted() {
			return errors.Errorf("fail to start store: %s", s)
		}
		return nil
	}); err != nil {
		t.Error(err)
	}
}
コード例 #4
0
ファイル: local_test_cluster.go プロジェクト: knz/cockroach
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
	ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
	nc := &base.NodeIDContainer{}
	ambient.AddLogTag("n", nc)

	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}

	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
	server := rpc.NewServer(rpcContext) // never started
	ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
	ltc.Stopper.AddCloser(ltc.Eng)

	ltc.Stores = storage.NewStores(ambient, ltc.Clock)

	ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
		ltc.Gossip)
	if ltc.DBContext == nil {
		dbCtx := client.DefaultDBContext()
		ltc.DBContext = &dbCtx
	}
	ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
	transport := storage.NewDummyRaftTransport()
	cfg := storage.TestStoreConfig()
	if ltc.RangeRetryOptions != nil {
		cfg.RangeRetryOptions = *ltc.RangeRetryOptions
	}
	cfg.AmbientCtx = ambient
	cfg.Clock = ltc.Clock
	cfg.DB = ltc.DB
	cfg.Gossip = ltc.Gossip
	cfg.Transport = transport
	cfg.MetricsSampleInterval = metric.TestSampleInterval
	ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	nc.Set(context.TODO(), nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
コード例 #5
0
ファイル: node_test.go プロジェクト: jmptrader/cockroach
// TestNodeJoinSelf verifies that an uninitialized node trying to join
// itself will fail.
func TestNodeJoinSelf(t *testing.T) {
	defer leaktest.AfterTest(t)()

	e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	defer e.Close()
	engines := []engine.Engine{e}
	_, addr, _, node, stopper := createTestNode(util.TestAddr, engines, util.TestAddr, t)
	defer stopper.Stop()
	err := node.start(context.Background(), addr, engines, roachpb.Attributes{}, roachpb.Locality{})
	if err != errCannotJoinSelf {
		t.Fatalf("expected err %s; got %s", errCannotJoinSelf, err)
	}
}
コード例 #6
0
ファイル: migration_test.go プロジェクト: knz/cockroach
func TestMigrate7310And6991(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	eng := engine.NewInMem(roachpb.Attributes{}, 1<<10)
	stopper.AddCloser(eng)

	desc := *testRangeDescriptor()

	if err := migrate7310And6991(context.Background(), eng, desc); err != nil {
		t.Fatal(err)
	}

	ts, err := loadTruncatedState(context.Background(), eng, desc.RangeID)
	if err != nil {
		t.Fatal(err)
	}

	hs, err := loadHardState(context.Background(), eng, desc.RangeID)
	if err != nil {
		t.Fatal(err)
	}

	rApplied, lApplied, err := loadAppliedIndex(context.Background(), eng, desc.RangeID)
	if err != nil {
		t.Fatal(err)
	}

	expTS := roachpb.RaftTruncatedState{Term: raftInitialLogTerm, Index: raftInitialLogIndex}
	if expTS != ts {
		t.Errorf("expected %+v, got %+v", &expTS, &ts)
	}

	expHS := raftpb.HardState{Term: raftInitialLogTerm, Commit: raftInitialLogIndex}
	if !reflect.DeepEqual(expHS, hs) {
		t.Errorf("expected %+v, got %+v", &expHS, &hs)
	}

	expRApplied, expLApplied := uint64(raftInitialLogIndex), uint64(0)
	if expRApplied != rApplied || expLApplied != lApplied {
		t.Errorf("expected (raftApplied,leaseApplied)=(%d,%d), got (%d,%d)",
			expRApplied, expLApplied, rApplied, lApplied)
	}
}
コード例 #7
0
ファイル: stores_test.go プロジェクト: hvaara/cockroach
// createStores creates a slice of count stores.
func createStores(count int, t *testing.T) (*hlc.ManualClock, []*Store, *Stores, *stop.Stopper) {
	stopper := stop.NewStopper()
	manual := hlc.NewManualClock(123)
	cfg := TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
	ls := NewStores(log.AmbientContext{}, cfg.Clock)

	// Create two stores with ranges we care about.
	stores := []*Store{}
	for i := 0; i < 2; i++ {
		cfg.Transport = NewDummyRaftTransport()
		eng := engine.NewInMem(roachpb.Attributes{}, 1<<20)
		stopper.AddCloser(eng)
		s := NewStore(cfg, eng, &roachpb.NodeDescriptor{NodeID: 1})
		storeIDAlloc++
		s.Ident.StoreID = storeIDAlloc
		stores = append(stores, s)
	}

	return manual, stores, ls, stopper
}
コード例 #8
0
ファイル: node_test.go プロジェクト: jmptrader/cockroach
// TestBootstrapCluster verifies the results of bootstrapping a
// cluster. Uses an in memory engine.
func TestBootstrapCluster(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	stopper.AddCloser(e)
	if _, err := bootstrapCluster(
		storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
	); err != nil {
		t.Fatal(err)
	}

	// Scan the complete contents of the local database directly from the engine.
	rows, _, _, err := engine.MVCCScan(context.Background(), e, keys.LocalMax, roachpb.KeyMax, math.MaxInt64, hlc.MaxTimestamp, true, nil)
	if err != nil {
		t.Fatal(err)
	}
	var foundKeys keySlice
	for _, kv := range rows {
		foundKeys = append(foundKeys, kv.Key)
	}
	var expectedKeys = keySlice{
		testutils.MakeKey(roachpb.Key("\x02"), roachpb.KeyMax),
		testutils.MakeKey(roachpb.Key("\x03"), roachpb.KeyMax),
		roachpb.Key("\x04node-idgen"),
		roachpb.Key("\x04store-idgen"),
	}
	// Add the initial keys for sql.
	for _, kv := range GetBootstrapSchema().GetInitialValues() {
		expectedKeys = append(expectedKeys, kv.Key)
	}
	// Resort the list. The sql values are not sorted.
	sort.Sort(expectedKeys)

	if !reflect.DeepEqual(foundKeys, expectedKeys) {
		t.Errorf("expected keys mismatch:\n%s\n  -- vs. -- \n\n%s",
			formatKeys(foundKeys), formatKeys(expectedKeys))
	}

	// TODO(spencer): check values.
}
コード例 #9
0
ファイル: config.go プロジェクト: knz/cockroach
// CreateEngines creates Engines based on the specs in ctx.Stores.
func (cfg *Config) CreateEngines() (Engines, error) {
	engines := Engines(nil)
	defer engines.Close()

	if cfg.enginesCreated {
		return Engines{}, errors.Errorf("engines already created")
	}
	cfg.enginesCreated = true

	cache := engine.NewRocksDBCache(cfg.CacheSize)
	defer cache.Release()

	var physicalStores int
	for _, spec := range cfg.Stores.Specs {
		if !spec.InMemory {
			physicalStores++
		}
	}
	openFileLimitPerStore, err := setOpenFileLimit(physicalStores)
	if err != nil {
		return Engines{}, err
	}

	skipSizeCheck := cfg.TestingKnobs.Store != nil &&
		cfg.TestingKnobs.Store.(*storage.StoreTestingKnobs).SkipMinSizeCheck
	for _, spec := range cfg.Stores.Specs {
		var sizeInBytes = spec.SizeInBytes
		if spec.InMemory {
			if spec.SizePercent > 0 {
				sysMem, err := GetTotalMemory()
				if err != nil {
					return Engines{}, errors.Errorf("could not retrieve system memory")
				}
				sizeInBytes = int64(float64(sysMem) * spec.SizePercent / 100)
			}
			if sizeInBytes != 0 && !skipSizeCheck && sizeInBytes < base.MinimumStoreSize {
				return Engines{}, errors.Errorf("%f%% of memory is only %s bytes, which is below the minimum requirement of %s",
					spec.SizePercent, humanizeutil.IBytes(sizeInBytes), humanizeutil.IBytes(base.MinimumStoreSize))
			}
			engines = append(engines, engine.NewInMem(spec.Attributes, sizeInBytes))
		} else {
			if spec.SizePercent > 0 {
				fileSystemUsage := gosigar.FileSystemUsage{}
				if err := fileSystemUsage.Get(spec.Path); err != nil {
					return Engines{}, err
				}
				sizeInBytes = int64(float64(fileSystemUsage.Total) * spec.SizePercent / 100)
			}
			if sizeInBytes != 0 && !skipSizeCheck && sizeInBytes < base.MinimumStoreSize {
				return Engines{}, errors.Errorf("%f%% of %s's total free space is only %s bytes, which is below the minimum requirement of %s",
					spec.SizePercent, spec.Path, humanizeutil.IBytes(sizeInBytes), humanizeutil.IBytes(base.MinimumStoreSize))
			}

			eng, err := engine.NewRocksDB(
				spec.Attributes,
				spec.Path,
				cache,
				sizeInBytes,
				openFileLimitPerStore,
			)
			if err != nil {
				return Engines{}, err
			}
			engines = append(engines, eng)
		}
	}

	if len(engines) == 1 {
		log.Infof(context.TODO(), "1 storage engine initialized")
	} else {
		log.Infof(context.TODO(), "%d storage engines initialized", len(engines))
	}
	enginesCopy := engines
	engines = nil
	return enginesCopy, nil
}
コード例 #10
0
// TestTxnPutOutOfOrder tests a case where a put operation of an older
// timestamp comes after a put operation of a newer timestamp in a
// txn. The test ensures such an out-of-order put succeeds and
// overrides an old value. The test uses a "Writer" and a "Reader"
// to reproduce an out-of-order put.
//
// 1) The Writer executes a put operation and writes a write intent with
//    time T in a txn.
// 2) Before the Writer's txn is committed, the Reader sends a high priority
//    get operation with time T+100. This pushes the Writer txn timestamp to
//    T+100 and triggers the restart of the Writer's txn. The original
//    write intent timestamp is also updated to T+100.
// 3) The Writer starts a new epoch of the txn, but before it writes, the
//    Reader sends another high priority get operation with time T+200. This
//    pushes the Writer txn timestamp to T+200 to trigger a restart of the
//    Writer txn. The Writer will not actually restart until it tries to commit
//    the current epoch of the transaction. The Reader updates the timestamp of
//    the write intent to T+200. The test deliberately fails the Reader get
//    operation, and cockroach doesn't update its read timestamp cache.
// 4) The Writer executes the put operation again. This put operation comes
//    out-of-order since its timestamp is T+100, while the intent timestamp
//    updated at Step 3 is T+200.
// 5) The put operation overrides the old value using timestamp T+100.
// 6) When the Writer attempts to commit its txn, the txn will be restarted
//    again at a new epoch timestamp T+200, which will finally succeed.
func TestTxnPutOutOfOrder(t *testing.T) {
	defer leaktest.AfterTest(t)()

	const key = "key"
	// Set up a filter to so that the get operation at Step 3 will return an error.
	var numGets int32

	stopper := stop.NewStopper()
	defer stopper.Stop()
	manual := hlc.NewManualClock(123)
	cfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
	cfg.TestingKnobs.TestingCommandFilter =
		func(filterArgs storagebase.FilterArgs) *roachpb.Error {
			if _, ok := filterArgs.Req.(*roachpb.GetRequest); ok &&
				filterArgs.Req.Header().Key.Equal(roachpb.Key(key)) &&
				filterArgs.Hdr.Txn == nil {
				// The Reader executes two get operations, each of which triggers two get requests
				// (the first request fails and triggers txn push, and then the second request
				// succeeds). Returns an error for the fourth get request to avoid timestamp cache
				// update after the third get operation pushes the txn timestamp.
				if atomic.AddInt32(&numGets, 1) == 4 {
					return roachpb.NewErrorWithTxn(errors.Errorf("Test"), filterArgs.Hdr.Txn)
				}
			}
			return nil
		}
	eng := engine.NewInMem(roachpb.Attributes{}, 10<<20)
	stopper.AddCloser(eng)
	store := createTestStoreWithEngine(t,
		eng,
		true,
		cfg,
		stopper,
	)

	// Put an initial value.
	initVal := []byte("initVal")
	err := store.DB().Put(context.TODO(), key, initVal)
	if err != nil {
		t.Fatalf("failed to put: %s", err)
	}

	waitPut := make(chan struct{})
	waitFirstGet := make(chan struct{})
	waitTxnRestart := make(chan struct{})
	waitSecondGet := make(chan struct{})
	waitTxnComplete := make(chan struct{})

	// Start the Writer.
	go func() {
		epoch := -1
		// Start a txn that does read-after-write.
		// The txn will be restarted twice, and the out-of-order put
		// will happen in the second epoch.
		if err := store.DB().Txn(context.TODO(), func(txn *client.Txn) error {
			epoch++

			if epoch == 1 {
				// Wait until the second get operation is issued.
				close(waitTxnRestart)
				<-waitSecondGet
			}

			updatedVal := []byte("updatedVal")
			if err := txn.Put(key, updatedVal); err != nil {
				return err
			}

			// Make sure a get will return the value that was just written.
			actual, err := txn.Get(key)
			if err != nil {
				return err
			}
			if !bytes.Equal(actual.ValueBytes(), updatedVal) {
				t.Fatalf("unexpected get result: %s", actual)
			}

			if epoch == 0 {
				// Wait until the first get operation will push the txn timestamp.
				close(waitPut)
				<-waitFirstGet
			}

			b := txn.NewBatch()
			return txn.CommitInBatch(b)
		}); err != nil {
			t.Fatal(err)
		}

		if epoch != 2 {
			t.Fatalf("unexpected number of txn retries: %d", epoch)
		}

		close(waitTxnComplete)
	}()

	<-waitPut

	// Start the Reader.

	// Advance the clock and send a get operation with higher
	// priority to trigger the txn restart.
	manual.Increment(100)

	priority := roachpb.UserPriority(-math.MaxInt32)
	requestHeader := roachpb.Span{
		Key: roachpb.Key(key),
	}
	if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
		Timestamp:    cfg.Clock.Now(),
		UserPriority: priority,
	}, &roachpb.GetRequest{Span: requestHeader}); err != nil {
		t.Fatalf("failed to get: %s", err)
	}

	// Wait until the writer restarts the txn.
	close(waitFirstGet)
	<-waitTxnRestart

	// Advance the clock and send a get operation again. This time
	// we use TestingCommandFilter so that a get operation is not
	// processed after the write intent is resolved (to prevent the
	// timestamp cache from being updated).
	manual.Increment(100)

	if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
		Timestamp:    cfg.Clock.Now(),
		UserPriority: priority,
	}, &roachpb.GetRequest{Span: requestHeader}); err == nil {
		t.Fatal("unexpected success of get")
	}

	close(waitSecondGet)
	<-waitTxnComplete
}
コード例 #11
0
ファイル: stores_test.go プロジェクト: knz/cockroach
func TestStoresLookupReplica(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	cfg := TestStoreConfig()
	manualClock := hlc.NewManualClock(0)
	cfg.Clock = hlc.NewClock(manualClock.UnixNano)
	ls := NewStores(log.AmbientContext{}, cfg.Clock)

	// Create two new stores with ranges we care about.
	var e [2]engine.Engine
	var s [2]*Store
	var d [2]*roachpb.RangeDescriptor
	ranges := []struct {
		storeID    roachpb.StoreID
		start, end roachpb.RKey
	}{
		{2, roachpb.RKeyMin, roachpb.RKey("c")},
		{3, roachpb.RKey("x"), roachpb.RKey("z")},
	}
	for i, rng := range ranges {
		e[i] = engine.NewInMem(roachpb.Attributes{}, 1<<20)
		stopper.AddCloser(e[i])
		cfg.Transport = NewDummyRaftTransport()
		s[i] = NewStore(cfg, e[i], &roachpb.NodeDescriptor{NodeID: 1})
		s[i].Ident.StoreID = rng.storeID

		d[i] = &roachpb.RangeDescriptor{
			RangeID:  roachpb.RangeID(i),
			StartKey: rng.start,
			EndKey:   rng.end,
			Replicas: []roachpb.ReplicaDescriptor{{StoreID: rng.storeID}},
		}
		newRng, err := NewReplica(d[i], s[i], 0)
		if err != nil {
			t.Fatal(err)
		}
		if err := s[i].AddReplica(newRng); err != nil {
			t.Error(err)
		}
		ls.AddStore(s[i])
	}

	testCases := []struct {
		start, end roachpb.RKey
		expStoreID roachpb.StoreID
		expError   string
	}{
		{
			start:      roachpb.RKey("a"),
			end:        roachpb.RKey("c"),
			expStoreID: s[0].Ident.StoreID,
		},
		{
			start:      roachpb.RKey("b"),
			end:        nil,
			expStoreID: s[0].Ident.StoreID,
		},
		{
			start:    roachpb.RKey("b"),
			end:      roachpb.RKey("d"),
			expError: "outside of bounds of range",
		},
		{
			start:      roachpb.RKey("x"),
			end:        roachpb.RKey("z"),
			expStoreID: s[1].Ident.StoreID,
		},
		{
			start:      roachpb.RKey("y"),
			end:        nil,
			expStoreID: s[1].Ident.StoreID,
		},
		{
			start:    roachpb.RKey("z1"),
			end:      roachpb.RKey("z2"),
			expError: "range 0 was not found",
		},
	}
	for testIdx, tc := range testCases {
		_, r, err := ls.LookupReplica(tc.start, tc.end)
		if tc.expError != "" {
			if !testutils.IsError(err, tc.expError) {
				t.Errorf("%d: got error %v (expected %s)", testIdx, err, tc.expError)
			}
		} else if err != nil {
			t.Errorf("%d: %s", testIdx, err)
		} else if r.StoreID != tc.expStoreID {
			t.Errorf("%d: expected store %d; got %d", testIdx, tc.expStoreID, r.StoreID)
		}
	}

	if desc, err := ls.FirstRange(); err != nil {
		t.Error(err)
	} else if !reflect.DeepEqual(desc, d[0]) {
		t.Fatalf("expected first range %+v; got %+v", desc, d[0])
	}
}
コード例 #12
0
ファイル: node_test.go プロジェクト: jmptrader/cockroach
// TestStartNodeWithLocality creates a new node and store and starts them with a
// collection of different localities.
func TestStartNodeWithLocality(t *testing.T) {
	defer leaktest.AfterTest(t)()

	testLocalityWitNewNode := func(locality roachpb.Locality) {
		e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
		defer e.Close()
		if _, err := bootstrapCluster(
			storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
		); err != nil {
			t.Fatal(err)
		}
		_, _, node, stopper := createAndStartTestNode(
			util.TestAddr,
			[]engine.Engine{e},
			util.TestAddr,
			locality,
			t,
		)
		defer stopper.Stop()

		// Check the node to make sure the locality was propagated to its
		// nodeDescriptor.
		if !reflect.DeepEqual(node.Descriptor.Locality, locality) {
			t.Fatalf("expected node locality to be %s, but it was %s", locality, node.Descriptor.Locality)
		}

		// Check the store to make sure the locality was propagated to its
		// nodeDescriptor.
		if err := node.stores.VisitStores(func(store *storage.Store) error {
			desc, err := store.Descriptor()
			if err != nil {
				t.Fatal(err)
			}
			if !reflect.DeepEqual(desc.Node.Locality, locality) {
				t.Fatalf("expected store's node locality to be %s, but it was %s", locality, desc.Node.Locality)
			}
			return nil
		}); err != nil {
			t.Fatal(err)
		}
	}

	testCases := []roachpb.Locality{
		{},
		{
			Tiers: []roachpb.Tier{
				{Key: "a", Value: "b"},
			},
		},
		{
			Tiers: []roachpb.Tier{
				{Key: "a", Value: "b"},
				{Key: "c", Value: "d"},
				{Key: "e", Value: "f"},
			},
		},
	}

	for _, testCase := range testCases {
		testLocalityWitNewNode(testCase)
	}
}
コード例 #13
0
ファイル: node_test.go プロジェクト: jmptrader/cockroach
// TestNodeJoin verifies a new node is able to join a bootstrapped
// cluster consisting of one node.
func TestNodeJoin(t *testing.T) {
	defer leaktest.AfterTest(t)()
	engineStopper := stop.NewStopper()
	defer engineStopper.Stop()
	e := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	engineStopper.AddCloser(e)
	if _, err := bootstrapCluster(
		storage.StoreConfig{}, []engine.Engine{e}, kv.MakeTxnMetrics(metric.TestSampleInterval),
	); err != nil {
		t.Fatal(err)
	}

	// Start the bootstrap node.
	engines1 := []engine.Engine{e}
	_, server1Addr, node1, stopper1 := createAndStartTestNode(
		util.TestAddr,
		engines1,
		util.TestAddr,
		roachpb.Locality{},
		t,
	)
	defer stopper1.Stop()

	// Create a new node.
	e2 := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	engineStopper.AddCloser(e2)
	engines2 := []engine.Engine{e2}
	_, server2Addr, node2, stopper2 := createAndStartTestNode(
		util.TestAddr,
		engines2,
		server1Addr,
		roachpb.Locality{},
		t,
	)
	defer stopper2.Stop()

	// Verify new node is able to bootstrap its store.
	util.SucceedsSoon(t, func() error {
		if sc := node2.stores.GetStoreCount(); sc != 1 {
			return errors.Errorf("GetStoreCount() expected 1; got %d", sc)
		}
		return nil
	})

	// Verify node1 sees node2 via gossip and vice versa.
	node1Key := gossip.MakeNodeIDKey(node1.Descriptor.NodeID)
	node2Key := gossip.MakeNodeIDKey(node2.Descriptor.NodeID)
	util.SucceedsSoon(t, func() error {
		var nodeDesc1 roachpb.NodeDescriptor
		if err := node1.storeCfg.Gossip.GetInfoProto(node2Key, &nodeDesc1); err != nil {
			return err
		}
		if addr2Str, server2AddrStr := nodeDesc1.Address.String(), server2Addr.String(); addr2Str != server2AddrStr {
			return errors.Errorf("addr2 gossip %s doesn't match addr2 address %s", addr2Str, server2AddrStr)
		}
		var nodeDesc2 roachpb.NodeDescriptor
		if err := node2.storeCfg.Gossip.GetInfoProto(node1Key, &nodeDesc2); err != nil {
			return err
		}
		if addr1Str, server1AddrStr := nodeDesc2.Address.String(), server1Addr.String(); addr1Str != server1AddrStr {
			return errors.Errorf("addr1 gossip %s doesn't match addr1 address %s", addr1Str, server1AddrStr)
		}
		return nil
	})
}
コード例 #14
0
ファイル: replica_state_test.go プロジェクト: knz/cockroach
func TestSynthesizeHardState(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	eng := engine.NewInMem(roachpb.Attributes{}, 1<<20)
	stopper.AddCloser(eng)

	tHS := raftpb.HardState{Term: 2, Vote: 3, Commit: 4}

	testCases := []struct {
		TruncTerm, RaftAppliedIndex uint64
		OldHS                       *raftpb.HardState
		NewHS                       raftpb.HardState
		Err                         string
	}{
		{OldHS: nil, TruncTerm: 42, RaftAppliedIndex: 24, NewHS: raftpb.HardState{Term: 42, Vote: 0, Commit: 24}},
		// Can't wind back the committed index of the new HardState.
		{OldHS: &tHS, RaftAppliedIndex: tHS.Commit - 1, Err: "can't decrease HardState.Commit"},
		{OldHS: &tHS, RaftAppliedIndex: tHS.Commit, NewHS: tHS},
		{OldHS: &tHS, RaftAppliedIndex: tHS.Commit + 1, NewHS: raftpb.HardState{Term: tHS.Term, Vote: 3, Commit: tHS.Commit + 1}},
		// Higher Term is picked up, but vote isn't carried over when the term
		// changes.
		{OldHS: &tHS, RaftAppliedIndex: tHS.Commit, TruncTerm: 11, NewHS: raftpb.HardState{Term: 11, Vote: 0, Commit: tHS.Commit}},
	}

	for i, test := range testCases {
		func() {
			batch := eng.NewBatch()
			defer batch.Close()
			testState := storagebase.ReplicaState{
				Desc:             testRangeDescriptor(),
				TruncatedState:   &roachpb.RaftTruncatedState{Term: test.TruncTerm},
				RaftAppliedIndex: test.RaftAppliedIndex,
			}

			if test.OldHS != nil {
				if err := setHardState(context.Background(), batch, testState.Desc.RangeID, *test.OldHS); err != nil {
					t.Fatal(err)
				}
			}

			oldHS, err := loadHardState(context.Background(), batch, testState.Desc.RangeID)
			if err != nil {
				t.Fatal(err)
			}

			if err := synthesizeHardState(context.Background(), batch, testState, oldHS); !(test.Err == "" && err == nil || testutils.IsError(err, test.Err)) {
				t.Fatalf("%d: expected %s got %v", i, test.Err, err)
			} else if err != nil {
				// No further checking if we expected an error and got it.
				return
			}

			hs, err := loadHardState(context.Background(), batch, testState.Desc.RangeID)
			if err != nil {
				t.Fatal(err)
			}
			if !reflect.DeepEqual(hs, test.NewHS) {
				t.Fatalf("%d: expected %+v, got %+v", i, &test.NewHS, &hs)
			}
		}()
	}
}