예제 #1
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
	ltc.lSender = newRetryableLocalSender(NewLocalSender())
	ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)
	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.lSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
예제 #2
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {

	nodeDesc := &proto.NodeDescriptor{NodeID: 1}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
	ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20, ltc.Stopper)

	ltc.localSender = NewLocalSender()
	var rpcSend rpcSendFn = func(_ rpc.Options, _ string, _ []net.Addr,
		getArgs func(addr net.Addr) gogoproto.Message, getReply func() gogoproto.Message,
		_ *rpc.Context) ([]gogoproto.Message, error) {
		// TODO(tschottdorf): remove getReply().
		br, pErr := ltc.localSender.Send(context.Background(), *getArgs(nil).(*proto.BatchRequest))
		if br == nil {
			br = &proto.BatchResponse{}
		}
		if br.Error != nil {
			panic(proto.ErrorUnexpectedlySet(ltc.localSender, br))
		}
		br.Error = pErr
		return []gogoproto.Message{br}, nil
	}
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &defaultRPCRetryOptions,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,         // defined above
		RangeDescriptorDB:        ltc.localSender, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)

	transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.localSender.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
}
예제 #3
0
func createTestDBWithContext(
	t testing.TB, dbCtx client.DBContext,
) (*localtestcluster.LocalTestCluster, *TxnCoordSender) {
	s := &localtestcluster.LocalTestCluster{
		DBContext: &dbCtx,
	}
	s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	return s, s.Sender.(*TxnCoordSender)
}
예제 #4
0
// newNodeTestContext returns a rpc.Context for testing.
// It is meant to be used by nodes.
func newNodeTestContext(clock *hlc.Clock, stopper *stop.Stopper) *Context {
	if clock == nil {
		clock = hlc.NewClock(hlc.UnixNano)
	}
	ctx := NewContext(testutils.NewNodeTestBaseContext(), clock, stopper)
	ctx.heartbeatInterval = 10 * time.Millisecond
	ctx.heartbeatTimeout = 2 * defaultHeartbeatInterval
	return ctx
}
예제 #5
0
// TestHTTPSenderSend verifies sending posts.
func TestHTTPSenderSend(t *testing.T) {
	defer leaktest.AfterTest(t)
	server, addr := startTestHTTPServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		// Make sure SSL certs were properly specified.
		authenticationHook, err := security.AuthenticationHook(false /* !insecure */, r.TLS)
		if err != nil {
			t.Error(err)
		}

		if r.Method != "POST" {
			t.Errorf("expected method POST; got %s", r.Method)
		}
		if r.URL.Path != KVDBEndpoint+"Put" {
			t.Errorf("expected url %s; got %s", KVDBEndpoint+"Put", r.URL.Path)
		}
		// Unmarshal the request.
		reqBody, err := ioutil.ReadAll(r.Body)
		if err != nil {
			t.Errorf("unexpected error reading body: %s", err)
		}

		args := &proto.PutRequest{}
		if err := util.UnmarshalRequest(r, reqBody, args, util.AllEncodings); err != nil {
			t.Errorf("unexpected error unmarshalling request: %s", err)
		}

		// Validate request against incoming user.
		if err := authenticationHook(args, false /*not public*/); err != nil {
			t.Error(err)
		}

		if !args.Key.Equal(testPutReq.Key) || !args.Timestamp.Equal(testPutReq.Timestamp) {
			t.Errorf("expected parsed %+v to equal %+v", args, testPutReq)
		}
		body, contentType, err := util.MarshalResponse(r, testPutResp, util.AllEncodings)
		if err != nil {
			t.Errorf("failed to marshal response: %s", err)
		}
		w.Header().Set(util.ContentTypeHeader, contentType)
		w.Write(body)
	}))
	defer server.Close()

	sender, err := newHTTPSender(addr, testutils.NewNodeTestBaseContext(), defaultRetryOptions)
	if err != nil {
		t.Fatal(err)
	}
	reply := &proto.PutResponse{}
	sender.Send(context.Background(), proto.Call{Args: testPutReq, Reply: reply})
	if reply.GoError() != nil {
		t.Errorf("expected success; got %s", reply.GoError())
	}
	if !reply.Timestamp.Equal(testPutResp.Timestamp) {
		t.Errorf("expected received %+v to equal %+v", reply, testPutResp)
	}
}
예제 #6
0
func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext {
	rttc := &raftTransportTestContext{
		t:          t,
		stopper:    stop.NewStopper(),
		transports: map[roachpb.NodeID]*storage.RaftTransport{},
	}
	rttc.nodeRPCContext = rpc.NewContext(testutils.NewNodeTestBaseContext(), nil, rttc.stopper)
	server := rpc.NewServer(rttc.nodeRPCContext) // never started
	rttc.gossip = gossip.New(rttc.nodeRPCContext, server, nil, rttc.stopper, metric.NewRegistry())
	rttc.gossip.SetNodeID(1)
	return rttc
}
예제 #7
0
// TestHTTPSenderRetryHTTPSendError verifies that send is retried
// on all errors sending HTTP requests.
func TestHTTPSenderRetryHTTPSendError(t *testing.T) {
	defer leaktest.AfterTest(t)
	retryOptions := defaultRetryOptions
	retryOptions.InitialBackoff = 1 * time.Millisecond

	testCases := []func(*httptest.Server, http.ResponseWriter){
		// Send back an unparseable response but a success code on first try.
		func(s *httptest.Server, w http.ResponseWriter) {
			fmt.Fprintf(w, "\xff\xfe\x23\x44")
		},
		// Close the client connection.
		func(s *httptest.Server, w http.ResponseWriter) {
			s.CloseClientConnections()
		},
	}

	for i, testFunc := range testCases {
		count := 0
		var s *httptest.Server
		server, addr := startTestHTTPServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
			count++
			if count == 1 {
				// On first retry, invoke the error function.
				testFunc(s, w)
				return
			}
			// Success on second try.
			body, contentType, err := util.MarshalResponse(r, testPutResp, util.AllEncodings)
			if err != nil {
				t.Errorf("%d: failed to marshal response: %s", i, err)
			}
			w.Header().Set(util.ContentTypeHeader, contentType)
			w.Write(body)
		}))

		s = server
		sender, err := newHTTPSender(addr, testutils.NewNodeTestBaseContext(), retryOptions)
		if err != nil {
			t.Fatal(err)
		}
		reply := &proto.PutResponse{}
		sender.Send(context.Background(), proto.Call{Args: testPutReq, Reply: reply})
		if reply.GoError() != nil {
			t.Errorf("%d: expected success; got %s", i, reply.GoError())
		}
		if count != 2 {
			t.Errorf("%d: expected retry", i)
		}
		server.Close()
	}
}
예제 #8
0
func startTestHTTPServer(handler http.Handler) (*httptest.Server, string) {
	ctx := testutils.NewNodeTestBaseContext()

	httpServer := httptest.NewUnstartedServer(handler)
	tlsConfig, err := ctx.GetServerTLSConfig()
	if err != nil {
		log.Fatal(err)
	}
	httpServer.TLS = tlsConfig
	httpServer.StartTLS()

	addr := httpServer.Listener.Addr().String()
	return httpServer, addr
}
예제 #9
0
// checkConcurrency creates a history verifier, starts a new database
// and runs the verifier.
func checkConcurrency(
	name string,
	isolations []enginepb.IsolationType,
	txns []string,
	verify *verifier,
	t *testing.T,
) {
	verifier := newHistoryVerifier(name, txns, verify, t)
	dbCtx := client.DefaultDBContext()
	dbCtx.TxnRetryOptions = correctnessTestRetryOptions
	s := &localtestcluster.LocalTestCluster{
		DBContext:         &dbCtx,
		RangeRetryOptions: &correctnessTestRetryOptions,
	}
	s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	defer s.Stop()
	verifier.run(isolations, s.DB, t)
}
예제 #10
0
// benchmarkSingleRoundtripWithLatency runs a number of transactions writing to
// the same key back to back in a single round-trip. Latency is simulated
// by pausing before each RPC sent.
func benchmarkSingleRoundtripWithLatency(b *testing.B, latency time.Duration) {
	s := &localtestcluster.LocalTestCluster{}
	s.Latency = latency
	s.Start(b, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	defer s.Stop()
	defer b.StopTimer()
	key := roachpb.Key("key")
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		if tErr := s.DB.Txn(func(txn *client.Txn) error {
			b := txn.NewBatch()
			b.Put(key, fmt.Sprintf("value-%d", i))
			return txn.CommitInBatch(b)
		}); tErr != nil {
			b.Fatal(tErr)
		}
	}
}
예제 #11
0
func TestSendAndReceive(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	nodeRPCContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), nil, stopper)
	g := gossip.New(nodeRPCContext, nil, stopper)
	g.SetNodeID(roachpb.NodeID(1))

	// Create several servers, each of which has two stores (A raft
	// node ID addresses a store). Node 1 has stores 1 and 2, node 2 has
	// stores 3 and 4, etc.
	//
	// We suppose that range 1 is replicated across the odd-numbered
	// stores in reverse order to ensure that the various IDs are not
	// equal: replica 1 is store 5, replica 2 is store 3, and replica 3
	// is store 1.
	const numNodes = 3
	const storesPerNode = 2
	nextNodeID := roachpb.NodeID(2)
	nextStoreID := roachpb.StoreID(2)

	// Per-node state.
	transports := map[roachpb.NodeID]*storage.RaftTransport{}

	// Per-store state.
	storeNodes := map[roachpb.StoreID]roachpb.NodeID{}
	channels := map[roachpb.StoreID]channelServer{}
	replicaIDs := map[roachpb.StoreID]roachpb.ReplicaID{
		1: 3,
		3: 2,
		5: 1,
	}

	messageTypes := []raftpb.MessageType{
		raftpb.MsgSnap,
		raftpb.MsgHeartbeat,
	}

	for nodeIndex := 0; nodeIndex < numNodes; nodeIndex++ {
		nodeID := nextNodeID
		nextNodeID++
		grpcServer := rpc.NewServer(nodeRPCContext)
		ln, err := util.ListenAndServeGRPC(stopper, grpcServer, util.TestAddr)
		if err != nil {
			t.Fatal(err)
		}

		addr := ln.Addr()
		// Have to call g.SetNodeID before call g.AddInfo.
		g.ResetNodeID(roachpb.NodeID(nodeID))
		if err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID),
			&roachpb.NodeDescriptor{
				Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
			},
			time.Hour); err != nil {
			t.Fatal(err)
		}

		transport := storage.NewRaftTransport(storage.GossipAddressResolver(g), grpcServer, nodeRPCContext)
		transports[nodeID] = transport

		for storeIndex := 0; storeIndex < storesPerNode; storeIndex++ {
			storeID := nextStoreID
			nextStoreID++

			storeNodes[storeID] = nodeID

			channel := newChannelServer(numNodes*storesPerNode*len(messageTypes), 0)
			transport.Listen(storeID, channel.RaftMessage)
			channels[storeID] = channel
		}
	}

	messageTypeCounts := make(map[roachpb.StoreID]map[raftpb.MessageType]int)

	// Each store sends one snapshot and one heartbeat to each store, including
	// itself.
	for toStoreID, toNodeID := range storeNodes {
		if _, ok := messageTypeCounts[toStoreID]; !ok {
			messageTypeCounts[toStoreID] = make(map[raftpb.MessageType]int)
		}

		for fromStoreID, fromNodeID := range storeNodes {
			baseReq := storage.RaftMessageRequest{
				Message: raftpb.Message{
					From: uint64(fromStoreID),
					To:   uint64(toStoreID),
				},
				FromReplica: roachpb.ReplicaDescriptor{
					NodeID:  fromNodeID,
					StoreID: fromStoreID,
				},
				ToReplica: roachpb.ReplicaDescriptor{
					NodeID:  toNodeID,
					StoreID: toStoreID,
				},
			}

			for _, messageType := range messageTypes {
				req := baseReq
				req.Message.Type = messageType

				if err := transports[fromNodeID].Send(&req); err != nil {
					t.Errorf("unable to send %s from %d to %d: %s", req.Message.Type, fromNodeID, toNodeID, err)
				}
				messageTypeCounts[toStoreID][req.Message.Type]++
			}
		}
	}

	// Read all the messages from the channels. Note that the transport
	// does not guarantee in-order delivery between independent
	// transports, so we just verify that the right number of messages
	// end up in each channel.
	for toStoreID := range storeNodes {
		func() {
			for len(messageTypeCounts[toStoreID]) > 0 {
				req := <-channels[toStoreID].ch
				if req.Message.To != uint64(toStoreID) {
					t.Errorf("got unexpected message %v on channel %d", req, toStoreID)
				}

				if typeCounts, ok := messageTypeCounts[toStoreID]; ok {
					if _, ok := typeCounts[req.Message.Type]; ok {
						typeCounts[req.Message.Type]--
						if typeCounts[req.Message.Type] == 0 {
							delete(typeCounts, req.Message.Type)
						}
					} else {
						t.Errorf("expected %v to have key %v, but it did not", typeCounts, req.Message.Type)
					}
				} else {
					t.Errorf("expected %v to have key %v, but it did not", messageTypeCounts, toStoreID)
				}
			}

			delete(messageTypeCounts, toStoreID)
		}()

		select {
		case req := <-channels[toStoreID].ch:
			t.Errorf("got unexpected message %v on channel %d", req, toStoreID)
		case <-time.After(100 * time.Millisecond):
		}
	}

	if len(messageTypeCounts) > 0 {
		t.Errorf("remaining messages expected: %v", messageTypeCounts)
	}

	// Real raft messages have different node/store/replica IDs.
	// Send a message from replica 2 (on store 3, node 2) to replica 1 (on store 5, node 3)
	fromStoreID := roachpb.StoreID(3)
	toStoreID := roachpb.StoreID(5)
	expReq := &storage.RaftMessageRequest{
		GroupID: 1,
		Message: raftpb.Message{
			Type: raftpb.MsgApp,
			From: uint64(replicaIDs[fromStoreID]),
			To:   uint64(replicaIDs[toStoreID]),
		},
		FromReplica: roachpb.ReplicaDescriptor{
			NodeID:    storeNodes[fromStoreID],
			StoreID:   fromStoreID,
			ReplicaID: replicaIDs[fromStoreID],
		},
		ToReplica: roachpb.ReplicaDescriptor{
			NodeID:    storeNodes[toStoreID],
			StoreID:   toStoreID,
			ReplicaID: replicaIDs[toStoreID],
		},
	}
	if err := transports[storeNodes[fromStoreID]].Send(expReq); err != nil {
		t.Errorf("unable to send message from %d to %d: %s", fromStoreID, toStoreID, err)
	}
	if req := <-channels[toStoreID].ch; !proto.Equal(req, expReq) {
		t.Errorf("got unexpected message %v on channel %d", req, toStoreID)
	}

	select {
	case req := <-channels[toStoreID].ch:
		t.Errorf("got unexpected message %v on channel %d", req, toStoreID)
	default:
	}
}
예제 #12
0
// TestInOrderDelivery verifies that for a given pair of nodes, raft
// messages are delivered in order.
func TestInOrderDelivery(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	nodeRPCContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), nil, stopper)
	g := gossip.New(nodeRPCContext, nil, stopper)

	grpcServer := rpc.NewServer(nodeRPCContext)
	ln, err := util.ListenAndServeGRPC(stopper, grpcServer, util.TestAddr)
	if err != nil {
		t.Fatal(err)
	}

	const numMessages = 100
	nodeID := roachpb.NodeID(roachpb.NodeID(2))
	serverTransport := storage.NewRaftTransport(storage.GossipAddressResolver(g), grpcServer, nodeRPCContext)
	serverChannel := newChannelServer(numMessages, 10*time.Millisecond)
	serverTransport.Listen(roachpb.StoreID(nodeID), serverChannel.RaftMessage)
	addr := ln.Addr()
	// Have to set gossip.NodeID before call gossip.AddInofXXX.
	g.SetNodeID(nodeID)
	if err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID),
		&roachpb.NodeDescriptor{
			Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
		},
		time.Hour); err != nil {
		t.Fatal(err)
	}

	clientNodeID := roachpb.NodeID(2)
	clientTransport := storage.NewRaftTransport(storage.GossipAddressResolver(g), nil, nodeRPCContext)

	for i := 0; i < numMessages; i++ {
		req := &storage.RaftMessageRequest{
			GroupID: 1,
			Message: raftpb.Message{
				To:     uint64(nodeID),
				From:   uint64(clientNodeID),
				Commit: uint64(i),
			},
			ToReplica: roachpb.ReplicaDescriptor{
				NodeID:    nodeID,
				StoreID:   roachpb.StoreID(nodeID),
				ReplicaID: roachpb.ReplicaID(nodeID),
			},
			FromReplica: roachpb.ReplicaDescriptor{
				NodeID:    clientNodeID,
				StoreID:   roachpb.StoreID(clientNodeID),
				ReplicaID: roachpb.ReplicaID(clientNodeID),
			},
		}
		if err := clientTransport.Send(req); err != nil {
			t.Errorf("failed to send message %d: %s", i, err)
		}
	}

	for i := 0; i < numMessages; i++ {
		req := <-serverChannel.ch
		if req.Message.Commit != uint64(i) {
			t.Errorf("messages out of order: got %d while expecting %d", req.Message.Commit, i)
		}
	}
}
예제 #13
0
// createTestDB creates a local test server and starts it. The caller
// is responsible for stopping the test server.
func createTestDB(t testing.TB) (*localtestcluster.LocalTestCluster, *TxnCoordSender) {
	s := &localtestcluster.LocalTestCluster{}
	s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
	return s, s.Sender.(*TxnCoordSender)
}
예제 #14
0
// This is a fairly high-level test of CA and node certificates.
// We construct SSL server and clients and use the generated certs.
func TestUseCerts(t *testing.T) {
	defer leaktest.AfterTest(t)()
	// Do not mock cert access for this test.
	security.ResetReadFileFn()
	defer ResetTest()
	certsDir := util.CreateTempDir(t, "certs_test")
	defer util.CleanupDir(certsDir)

	err := security.RunCreateCACert(
		filepath.Join(certsDir, security.EmbeddedCACert),
		filepath.Join(certsDir, security.EmbeddedCAKey),
		512)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	err = security.RunCreateNodeCert(
		filepath.Join(certsDir, security.EmbeddedCACert),
		filepath.Join(certsDir, security.EmbeddedCAKey),
		filepath.Join(certsDir, security.EmbeddedNodeCert),
		filepath.Join(certsDir, security.EmbeddedNodeKey),
		512, []string{"127.0.0.1"})
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	err = security.RunCreateClientCert(
		filepath.Join(certsDir, security.EmbeddedCACert),
		filepath.Join(certsDir, security.EmbeddedCAKey),
		filepath.Join(certsDir, security.EmbeddedRootCert),
		filepath.Join(certsDir, security.EmbeddedRootKey),
		512, security.RootUser)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	// Load TLS Configs. This is what TestServer and HTTPClient do internally.
	_, err = security.LoadServerTLSConfig(
		filepath.Join(certsDir, security.EmbeddedCACert),
		filepath.Join(certsDir, security.EmbeddedNodeCert),
		filepath.Join(certsDir, security.EmbeddedNodeKey))
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	_, err = security.LoadClientTLSConfig(
		filepath.Join(certsDir, security.EmbeddedCACert),
		filepath.Join(certsDir, security.EmbeddedNodeCert),
		filepath.Join(certsDir, security.EmbeddedNodeKey))
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	// Start a test server and override certs.
	// We use a real context since we want generated certs.
	params := base.TestServerArgs{
		SSLCA:      filepath.Join(certsDir, security.EmbeddedCACert),
		SSLCert:    filepath.Join(certsDir, security.EmbeddedNodeCert),
		SSLCertKey: filepath.Join(certsDir, security.EmbeddedNodeKey),
	}
	s, _, _ := serverutils.StartServer(t, params)
	defer s.Stopper().Stop()

	// Insecure mode.
	clientContext := testutils.NewNodeTestBaseContext()
	clientContext.Insecure = true
	httpClient, err := clientContext.GetHTTPClient()
	if err != nil {
		t.Fatal(err)
	}
	req, err := http.NewRequest("GET", s.AdminURL()+"/_admin/v1/health", nil)
	if err != nil {
		t.Fatalf("could not create request: %v", err)
	}
	resp, err := httpClient.Do(req)
	if err == nil {
		resp.Body.Close()
		t.Fatalf("Expected SSL error, got success")
	}

	// Secure mode but no Certs: permissive config.
	clientContext = testutils.NewNodeTestBaseContext()
	clientContext.Insecure = false
	clientContext.SSLCert = ""
	httpClient, err = clientContext.GetHTTPClient()
	if err != nil {
		t.Fatal(err)
	}
	// Endpoint that does not enforce client auth (see: server/authentication_test.go)
	req, err = http.NewRequest("GET", s.AdminURL()+"/_admin/v1/health", nil)
	if err != nil {
		t.Fatalf("could not create request: %v", err)
	}
	resp, err = httpClient.Do(req)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		t.Fatalf("Expected OK, got: %d", resp.StatusCode)
	}

	// New client. With certs this time.
	clientContext = testutils.NewNodeTestBaseContext()
	clientContext.SSLCA = filepath.Join(certsDir, security.EmbeddedCACert)
	clientContext.SSLCert = filepath.Join(certsDir, security.EmbeddedNodeCert)
	clientContext.SSLCertKey = filepath.Join(certsDir, security.EmbeddedNodeKey)
	httpClient, err = clientContext.GetHTTPClient()
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	req, err = http.NewRequest("GET", s.AdminURL()+"/_admin/v1/health", nil)
	if err != nil {
		t.Fatalf("could not create request: %v", err)
	}
	resp, err = httpClient.Do(req)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		t.Fatalf("Expected OK, got: %d", resp.StatusCode)
	}
}
예제 #15
0
// newNodeTestContext returns a rpc.Context for testing.
// It is meant to be used by nodes.
func newNodeTestContext(clock *hlc.Clock, stopper *stop.Stopper) *rpc.Context {
	ctx := rpc.NewContext(testutils.NewNodeTestBaseContext(), clock, stopper)
	ctx.HeartbeatInterval = 10 * time.Millisecond
	ctx.HeartbeatTimeout = 5 * time.Second
	return ctx
}
예제 #16
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestBootstrap, ltc.Stopper)
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)

	ltc.stores = storage.NewStores(ltc.Clock)
	tracer := tracing.NewTracer()
	var rpcSend rpcSendFn = func(_ SendOptions, _ ReplicaSlice,
		args roachpb.BatchRequest, _ *rpc.Context) (proto.Message, error) {
		if ltc.Latency > 0 {
			time.Sleep(ltc.Latency)
		}
		sp := tracer.StartSpan("node")
		defer sp.Finish()
		ctx := opentracing.ContextWithSpan(context.Background(), sp)
		sp.LogEvent(args.String())
		br, pErr := ltc.stores.Send(ctx, args)
		if br == nil {
			br = &roachpb.BatchResponse{}
		}
		if br.Error != nil {
			panic(roachpb.ErrorUnexpectedlySet(ltc.stores, br))
		}
		br.Error = pErr
		if pErr != nil {
			sp.LogEvent("error: " + pErr.String())
		}
		return br, nil
	}
	retryOpts := GetDefaultDistSenderRetryOptions()
	retryOpts.Closer = ltc.Stopper.ShouldDrain()
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &retryOpts,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,    // defined above
		RangeDescriptorDB:        ltc.stores, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, tracer,
		ltc.Stopper, NewTxnMetrics(metric.NewRegistry()))
	ltc.DB = client.NewDB(ltc.Sender)
	transport := storage.NewDummyRaftTransport()
	ctx := storage.TestStoreContext()
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ctx.Tracer = tracer
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Gossip.SetNodeID(nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
예제 #17
0
// Start constructs and starts the local test server and creates a
// time series DB.
func (tm *testModel) Start() {
	tm.LocalTestCluster.Start(tm.t, testutils.NewNodeTestBaseContext(),
		kv.InitSenderForLocalTestCluster)
	tm.DB = NewDB(tm.LocalTestCluster.DB)
}
예제 #18
0
// Verify client certificate enforcement and user whitelisting.
func TestSSLEnforcement(t *testing.T) {
	defer leaktest.AfterTest(t)()
	s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
	defer s.Stopper().Stop()

	// HTTPS with client certs for security.RootUser.
	rootCertsContext := testutils.NewTestBaseContext(security.RootUser)
	// HTTPS with client certs for security.NodeUser.
	nodeCertsContext := testutils.NewNodeTestBaseContext()
	// HTTPS with client certs for TestUser.
	testCertsContext := testutils.NewTestBaseContext(TestUser)
	// HTTPS without client certs. The user does not matter.
	noCertsContext := testutils.NewTestBaseContext(TestUser)
	noCertsContext.SSLCert = ""
	// Plain http.
	insecureContext := testutils.NewTestBaseContext(TestUser)
	insecureContext.Insecure = true

	kvGet := &roachpb.GetRequest{}
	kvGet.Key = roachpb.Key("/")

	testCases := []struct {
		method, path string
		body         proto.Message
		ctx          *base.Context
		success      bool // request sent successfully (may be non-200)
		code         int  // http response code
	}{
		// /ui/: basic file server: no auth.
		{"GET", "/index.html", nil, rootCertsContext, true, http.StatusOK},
		{"GET", "/index.html", nil, nodeCertsContext, true, http.StatusOK},
		{"GET", "/index.html", nil, testCertsContext, true, http.StatusOK},
		{"GET", "/index.html", nil, noCertsContext, true, http.StatusOK},
		// TODO(tamird): s/308/http.StatusPermanentRedirect/ when it exists.
		{"GET", "/index.html", nil, insecureContext, true, 308},

		// /_admin/: server.adminServer: no auth.
		{"GET", healthPath, nil, rootCertsContext, true, http.StatusOK},
		{"GET", healthPath, nil, nodeCertsContext, true, http.StatusOK},
		{"GET", healthPath, nil, testCertsContext, true, http.StatusOK},
		{"GET", healthPath, nil, noCertsContext, true, http.StatusOK},
		// TODO(tamird): s/308/http.StatusPermanentRedirect/ when it exists.
		{"GET", healthPath, nil, insecureContext, true, 308},

		// /debug/: server.adminServer: no auth.
		{"GET", debugEndpoint + "vars", nil, rootCertsContext, true, http.StatusOK},
		{"GET", debugEndpoint + "vars", nil, nodeCertsContext, true, http.StatusOK},
		{"GET", debugEndpoint + "vars", nil, testCertsContext, true, http.StatusOK},
		{"GET", debugEndpoint + "vars", nil, noCertsContext, true, http.StatusOK},
		// TODO(tamird): s/308/http.StatusPermanentRedirect/ when it exists.
		{"GET", debugEndpoint + "vars", nil, insecureContext, true, 308},

		// /_status/nodes: server.statusServer: no auth.
		{"GET", statusNodesPrefix, nil, rootCertsContext, true, http.StatusOK},
		{"GET", statusNodesPrefix, nil, nodeCertsContext, true, http.StatusOK},
		{"GET", statusNodesPrefix, nil, testCertsContext, true, http.StatusOK},
		{"GET", statusNodesPrefix, nil, noCertsContext, true, http.StatusOK},
		// TODO(tamird): s/308/http.StatusPermanentRedirect/ when it exists.
		{"GET", statusNodesPrefix, nil, insecureContext, true, 308},

		// /ts/: ts.Server: no auth.
		{"GET", ts.URLPrefix, nil, rootCertsContext, true, http.StatusNotFound},
		{"GET", ts.URLPrefix, nil, nodeCertsContext, true, http.StatusNotFound},
		{"GET", ts.URLPrefix, nil, testCertsContext, true, http.StatusNotFound},
		{"GET", ts.URLPrefix, nil, noCertsContext, true, http.StatusNotFound},
		// TODO(tamird): s/308/http.StatusPermanentRedirect/ when it exists.
		{"GET", ts.URLPrefix, nil, insecureContext, true, 308},
	}

	for tcNum, tc := range testCases {
		client, err := tc.ctx.GetHTTPClient()
		if err != nil {
			t.Fatalf("[%d]: failed to get http client: %v", tcNum, err)
		}
		url := fmt.Sprintf(
			"%s://%s%s", tc.ctx.HTTPRequestScheme(),
			s.(*TestServer).Ctx.HTTPAddr, tc.path)
		resp, err := doHTTPReq(t, client, tc.method, url, tc.body)
		if (err == nil) != tc.success {
			t.Errorf("[%d]: expected success=%t, got err=%v", tcNum, tc.success, err)
		}
		if err != nil {
			continue
		}

		defer resp.Body.Close()
		if resp.StatusCode != tc.code {
			t.Errorf("[%d]: expected status code %d, got %d", tcNum, tc.code, resp.StatusCode)
		}
	}
}
예제 #19
0
// Verify client certificate enforcement and user whitelisting.
func TestSSLEnforcement(t *testing.T) {
	defer leaktest.AfterTest(t)
	s := StartTestServer(t)
	defer s.Stop()

	// HTTPS with client certs for "root".
	rootCertsContext := testutils.NewTestBaseContext(security.RootUser)
	// HTTPS with client certs for "node".
	nodeCertsContext := testutils.NewNodeTestBaseContext()
	// HTTPS with client certs for testuser.
	testCertsContext := testutils.NewTestBaseContext(TestUser)
	// HTTPS without client certs. The user does not matter.
	noCertsContext := testutils.NewTestBaseContext(TestUser)
	noCertsContext.Certs = ""
	// Plain http.
	insecureContext := testutils.NewTestBaseContext(TestUser)
	insecureContext.Insecure = true

	kvGet := &roachpb.GetRequest{}
	kvGet.Key = roachpb.Key("/")

	testCases := []struct {
		method, key string
		body        proto.Message
		ctx         *base.Context
		success     bool // request sent successfully (may be non-200)
		code        int  // http response code
	}{
		// /ui/: basic file server: no auth.
		{"GET", "/index.html", nil, rootCertsContext, true, http.StatusOK},
		{"GET", "/index.html", nil, nodeCertsContext, true, http.StatusOK},
		{"GET", "/index.html", nil, testCertsContext, true, http.StatusOK},
		{"GET", "/index.html", nil, noCertsContext, true, http.StatusOK},
		{"GET", "/index.html", nil, insecureContext, false, -1},

		// /_admin/: server.adminServer: no auth.
		{"GET", healthPath, nil, rootCertsContext, true, http.StatusOK},
		{"GET", healthPath, nil, nodeCertsContext, true, http.StatusOK},
		{"GET", healthPath, nil, testCertsContext, true, http.StatusOK},
		{"GET", healthPath, nil, noCertsContext, true, http.StatusOK},
		{"GET", healthPath, nil, insecureContext, false, -1},

		// /debug/: server.adminServer: no auth.
		{"GET", debugEndpoint + "vars", nil, rootCertsContext, true, http.StatusOK},
		{"GET", debugEndpoint + "vars", nil, nodeCertsContext, true, http.StatusOK},
		{"GET", debugEndpoint + "vars", nil, testCertsContext, true, http.StatusOK},
		{"GET", debugEndpoint + "vars", nil, noCertsContext, true, http.StatusOK},
		{"GET", debugEndpoint + "vars", nil, insecureContext, false, -1},

		// /_status/nodes: server.statusServer: no auth.
		{"GET", statusNodesPrefix, nil, rootCertsContext, true, http.StatusOK},
		{"GET", statusNodesPrefix, nil, nodeCertsContext, true, http.StatusOK},
		{"GET", statusNodesPrefix, nil, testCertsContext, true, http.StatusOK},
		{"GET", statusNodesPrefix, nil, noCertsContext, true, http.StatusOK},
		{"GET", statusNodesPrefix, nil, insecureContext, false, -1},

		// /ts/: ts.Server: no auth.
		{"GET", ts.URLPrefix, nil, rootCertsContext, true, http.StatusNotFound},
		{"GET", ts.URLPrefix, nil, nodeCertsContext, true, http.StatusNotFound},
		{"GET", ts.URLPrefix, nil, testCertsContext, true, http.StatusNotFound},
		{"GET", ts.URLPrefix, nil, noCertsContext, true, http.StatusNotFound},
		{"GET", ts.URLPrefix, nil, insecureContext, false, -1},

		// /sql/: sql.Server. These are proto reqs. The important field is header.User.
		{"POST", driver.Endpoint + driver.Execute.String(), sqlForUser(rootCertsContext),
			rootCertsContext, true, http.StatusOK},
		{"POST", driver.Endpoint + driver.Execute.String(), sqlForUser(nodeCertsContext),
			nodeCertsContext, true, http.StatusOK},
		{"POST", driver.Endpoint + driver.Execute.String(), sqlForUser(testCertsContext),
			testCertsContext, true, http.StatusOK},
		{"POST", driver.Endpoint + driver.Execute.String(), sqlForUser(noCertsContext),
			noCertsContext, true, http.StatusUnauthorized},
		{"POST", driver.Endpoint + driver.Execute.String(), sqlForUser(insecureContext),
			insecureContext, false, -1},
	}

	for tcNum, tc := range testCases {
		client, err := tc.ctx.GetHTTPClient()
		if err != nil {
			t.Fatalf("[%d]: failed to get http client: %v", tcNum, err)
		}
		resp, err := doHTTPReq(t, client, tc.method,
			fmt.Sprintf("%s://%s%s", tc.ctx.HTTPRequestScheme(), s.ServingAddr(), tc.key),
			tc.body)
		if (err == nil) != tc.success {
			t.Fatalf("[%d]: expected success=%t, got err=%v", tcNum, tc.success, err)
		}
		if err != nil {
			continue
		}

		defer resp.Body.Close()
		if resp.StatusCode != tc.code {
			t.Errorf("[%d]: expected status code %d, got %d", tcNum, tc.code, resp.StatusCode)
		}
	}
}
예제 #20
0
// NewNodeTestContext returns a rpc.Context for testing.
// It is meant to be used by nodes.
func NewNodeTestContext(clock *hlc.Clock, stopper *stop.Stopper) *Context {
	if clock == nil {
		clock = hlc.NewClock(hlc.UnixNano)
	}
	return NewContext(testutils.NewNodeTestBaseContext(), clock, stopper)
}
예제 #21
0
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {

	nodeID := roachpb.NodeID(1)
	nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
	ltc.tester = t
	ltc.Manual = hlc.NewManualClock(0)
	ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
	ltc.Stopper = stop.NewStopper()
	rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
	ltc.Gossip = gossip.New(rpcContext, gossip.TestBootstrap, ltc.Stopper)
	ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)

	ltc.stores = storage.NewStores(ltc.Clock)
	var rpcSend rpcSendFn = func(_ SendOptions, _ string, _ []net.Addr,
		getArgs func(addr net.Addr) proto.Message, getReply func() proto.Message,
		_ *rpc.Context) (proto.Message, error) {
		// TODO(tschottdorf): remove getReply().
		if ltc.Latency > 0 {
			time.Sleep(ltc.Latency)
		}
		br, pErr := ltc.stores.Send(context.Background(), *getArgs(nil).(*roachpb.BatchRequest))
		if br == nil {
			br = &roachpb.BatchResponse{}
		}
		if br.Error != nil {
			panic(roachpb.ErrorUnexpectedlySet(ltc.stores, br))
		}
		br.Error = pErr
		return br, nil
	}
	retryOpts := GetDefaultDistSenderRetryOptions()
	retryOpts.Closer = ltc.Stopper.ShouldDrain()
	ltc.distSender = NewDistSender(&DistSenderContext{
		Clock: ltc.Clock,
		RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
		RangeLookupMaxRanges:     defaultRangeLookupMaxRanges,
		LeaderCacheSize:          defaultLeaderCacheSize,
		RPCRetryOptions:          &retryOpts,
		nodeDescriptor:           nodeDesc,
		RPCSend:                  rpcSend,    // defined above
		RangeDescriptorDB:        ltc.stores, // for descriptor lookup
	}, ltc.Gossip)

	ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, nil /* tracer */, ltc.Stopper)
	ltc.DB = client.NewDB(ltc.Sender)

	transport := storage.NewLocalRPCTransport(ltc.Stopper)
	ltc.Stopper.AddCloser(transport)
	ctx := storage.TestStoreContext
	ctx.Clock = ltc.Clock
	ctx.DB = ltc.DB
	ctx.Gossip = ltc.Gossip
	ctx.Transport = transport
	ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
	if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.stores.AddStore(ltc.Store)
	if err := ltc.Store.BootstrapRange(nil); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	if err := ltc.Store.Start(ltc.Stopper); err != nil {
		t.Fatalf("unable to start local test cluster: %s", err)
	}
	ltc.Gossip.SetNodeID(nodeDesc.NodeID)
	if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
		t.Fatalf("unable to set node descriptor: %s", err)
	}
}
예제 #22
0
func TestSendAndReceive(t *testing.T) {
	defer leaktest.AfterTest(t)()
	stopper := stop.NewStopper()
	defer stopper.Stop()
	nodeRPCContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), hlc.NewClock(hlc.UnixNano), stopper)
	g := gossip.New(nodeRPCContext, gossip.TestBootstrap, stopper)
	g.SetNodeID(roachpb.NodeID(1))

	// Create several servers, each of which has two stores (A raft
	// node ID addresses a store). Node 1 has stores 1 and 2, node 2 has
	// stores 3 and 4, etc.
	//
	// We suppose that range 1 is replicated across the odd-numbered
	// stores in reverse order to ensure that the various IDs are not
	// equal: replica 1 is store 5, replica 2 is store 3, and replica 3
	// is store 1.
	const numServers = 3
	const storesPerServer = 2
	const numStores = numServers * storesPerServer
	nextNodeID := roachpb.NodeID(2)
	nextStoreID := roachpb.StoreID(2)

	// Per-node state.
	transports := map[roachpb.NodeID]*storage.RaftTransport{}

	// Per-store state.
	storeNodes := map[roachpb.StoreID]roachpb.NodeID{}
	channels := map[roachpb.StoreID]channelServer{}
	replicaIDs := map[roachpb.StoreID]roachpb.ReplicaID{
		1: 3,
		3: 2,
		5: 1,
	}

	for serverIndex := 0; serverIndex < numServers; serverIndex++ {
		nodeID := nextNodeID
		nextNodeID++
		rpcServer := rpc.NewServer(nodeRPCContext)
		grpcServer := grpc.NewServer()
		tlsConfig, err := nodeRPCContext.GetServerTLSConfig()
		if err != nil {
			t.Fatal(err)
		}
		ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), util.CreateTestAddr("tcp"), tlsConfig)
		if err != nil {
			t.Fatal(err)
		}

		addr := ln.Addr()
		// Have to call g.SetNodeID before call g.AddInfo.
		g.ResetNodeID(roachpb.NodeID(nodeID))
		if err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID),
			&roachpb.NodeDescriptor{
				Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
			},
			time.Hour); err != nil {
			t.Fatal(err)
		}

		transport := storage.NewRaftTransport(storage.GossipAddressResolver(g), grpcServer, nodeRPCContext)
		transports[nodeID] = transport

		for store := 0; store < storesPerServer; store++ {
			storeID := nextStoreID
			nextStoreID++

			storeNodes[storeID] = nodeID

			channel := newChannelServer(10, 0)
			transport.Listen(storeID, channel.RaftMessage)
			channels[storeID] = channel
		}
	}

	// Heartbeat messages: Each store sends one message to each store.
	for fromStoreID, fromNodeID := range storeNodes {
		for toStoreID, toNodeID := range storeNodes {
			req := &storage.RaftMessageRequest{
				GroupID: 0,
				Message: raftpb.Message{
					Type: raftpb.MsgHeartbeat,
					From: uint64(fromStoreID),
					To:   uint64(toStoreID),
				},
				FromReplica: roachpb.ReplicaDescriptor{
					NodeID:    fromNodeID,
					StoreID:   fromStoreID,
					ReplicaID: 0,
				},
				ToReplica: roachpb.ReplicaDescriptor{
					NodeID:    toNodeID,
					StoreID:   toStoreID,
					ReplicaID: 0,
				},
			}

			if err := transports[fromNodeID].Send(req); err != nil {
				t.Errorf("Unable to send message from %d to %d: %s", fromNodeID, toNodeID, err)
			}
		}
	}

	// Read all the messages from the channels. Note that the transport
	// does not guarantee in-order delivery between independent
	// transports, so we just verify that the right number of messages
	// end up in each channel.
	for toStoreID := range storeNodes {
		for range storeNodes {
			select {
			case req := <-channels[toStoreID].ch:
				if req.Message.To != uint64(toStoreID) {
					t.Errorf("invalid message received on channel %d: %+v",
						toStoreID, req)
				}
			case <-time.After(5 * time.Second):
				t.Fatal("timed out waiting for message")
			}
		}

		select {
		case req := <-channels[toStoreID].ch:
			t.Errorf("got unexpected message %+v on channel %d", req, toStoreID)
		default:
		}
	}

	// Real raft messages have different node/store/replica IDs.
	// Send a message from replica 2 (on store 3, node 2) to replica 1 (on store 5, node 3)
	fromStoreID := roachpb.StoreID(3)
	toStoreID := roachpb.StoreID(5)
	req := &storage.RaftMessageRequest{
		GroupID: 1,
		Message: raftpb.Message{
			Type: raftpb.MsgApp,
			From: uint64(replicaIDs[fromStoreID]),
			To:   uint64(replicaIDs[toStoreID]),
		},
		FromReplica: roachpb.ReplicaDescriptor{
			NodeID:    storeNodes[fromStoreID],
			StoreID:   fromStoreID,
			ReplicaID: replicaIDs[fromStoreID],
		},
		ToReplica: roachpb.ReplicaDescriptor{
			NodeID:    storeNodes[toStoreID],
			StoreID:   toStoreID,
			ReplicaID: replicaIDs[toStoreID],
		},
	}
	if err := transports[storeNodes[fromStoreID]].Send(req); err != nil {
		t.Errorf("Unable to send message from %d to %d: %s", fromStoreID, toStoreID, err)
	}
	select {
	case req2 := <-channels[toStoreID].ch:
		if !reflect.DeepEqual(req, req2) {
			t.Errorf("got unexpected message %+v", req2)
		}

	case <-time.After(5 * time.Second):
		t.Fatal("timed out waiting for message")
	}

	select {
	case req := <-channels[toStoreID].ch:
		t.Errorf("got unexpected message %+v on channel %d", req, toStoreID)
	default:
	}
}
예제 #23
0
// This is a fairly high-level test of CA and node certificates.
// We construct SSL server and clients and use the generated certs.
func TestUseCerts(t *testing.T) {
	defer leaktest.AfterTest(t)
	// Do not mock cert access for this test.
	security.ResetReadFileFn()
	defer ResetTest()
	certsDir := util.CreateTempDir(t, "certs_test")
	defer util.CleanupDir(certsDir)

	err := security.RunCreateCACert(certsDir, 512)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	err = security.RunCreateNodeCert(certsDir, 512, []string{"127.0.0.1"})
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	err = security.RunCreateClientCert(certsDir, 512, security.RootUser)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	// Load TLS Configs. This is what TestServer and HTTPClient do internally.
	_, err = security.LoadServerTLSConfig(certsDir, "node")
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	_, err = security.LoadClientTLSConfig(certsDir, security.NodeUser)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}

	// Start a test server and override certs.
	// We use a real context since we want generated certs.
	testCtx := server.NewContext()
	testCtx.Certs = certsDir
	testCtx.User = security.NodeUser
	testCtx.Addr = "127.0.0.1:0"
	testCtx.PGAddr = "127.0.0.1:0"
	s := &server.TestServer{Ctx: testCtx}
	if err := s.Start(); err != nil {
		t.Fatal(err)
	}
	defer s.Stop()

	// Insecure mode.
	clientContext := testutils.NewNodeTestBaseContext()
	clientContext.Insecure = true
	httpClient, err := clientContext.GetHTTPClient()
	if err != nil {
		t.Fatal(err)
	}
	req, err := http.NewRequest("GET", "https://"+s.ServingAddr()+"/_admin/health", nil)
	if err != nil {
		t.Fatalf("could not create request: %v", err)
	}
	resp, err := httpClient.Do(req)
	if err == nil {
		resp.Body.Close()
		t.Fatalf("Expected SSL error, got success")
	}

	// Secure mode but no Certs directory: permissive config.
	clientContext = testutils.NewNodeTestBaseContext()
	clientContext.Certs = ""
	httpClient, err = clientContext.GetHTTPClient()
	if err != nil {
		t.Fatal(err)
	}
	// Endpoint that does not enforce client auth (see: server/authentication_test.go)
	req, err = http.NewRequest("GET", "https://"+s.ServingAddr()+"/_admin/health", nil)
	if err != nil {
		t.Fatalf("could not create request: %v", err)
	}
	resp, err = httpClient.Do(req)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		t.Fatalf("Expected OK, got: %d", resp.StatusCode)
	}

	// Endpoint that enforces client auth (see: server/authentication_test.go)
	req, err = http.NewRequest("GET", "https://"+s.ServingAddr()+driver.Endpoint, nil)
	if err != nil {
		t.Fatalf("could not create request: %v", err)
	}
	resp, err = httpClient.Do(req)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	resp.Body.Close()
	if resp.StatusCode != http.StatusUnauthorized {
		t.Fatalf("Expected status code %d, got: %d", http.StatusUnauthorized, resp.StatusCode)
	}

	// New client. With certs this time.
	clientContext = testutils.NewNodeTestBaseContext()
	clientContext.Certs = certsDir
	httpClient, err = clientContext.GetHTTPClient()
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	req, err = http.NewRequest("GET", "https://"+s.ServingAddr()+"/_admin/health", nil)
	if err != nil {
		t.Fatalf("could not create request: %v", err)
	}
	resp, err = httpClient.Do(req)
	if err != nil {
		t.Fatalf("Expected success, got %v", err)
	}
	resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		t.Fatalf("Expected OK, got: %d", resp.StatusCode)
	}
}
예제 #24
0
	"github.com/cockroachdb/cockroach/keys"
	"github.com/cockroachdb/cockroach/kv"
	"github.com/cockroachdb/cockroach/roachpb"
	"github.com/cockroachdb/cockroach/security"
	"github.com/cockroachdb/cockroach/sql"
	"github.com/cockroachdb/cockroach/sql/driver"
	"github.com/cockroachdb/cockroach/storage/engine"
	"github.com/cockroachdb/cockroach/testutils"
	"github.com/cockroachdb/cockroach/util"
	"github.com/cockroachdb/cockroach/util/leaktest"
	"github.com/cockroachdb/cockroach/util/stop"
	"github.com/gogo/protobuf/proto"
)

var testContext = NewTestContext()
var nodeTestBaseContext = testutils.NewNodeTestBaseContext()

// TestInitEngine tests whether the data directory string is parsed correctly.
func TestInitEngine(t *testing.T) {
	defer leaktest.AfterTest(t)
	tmp := util.CreateNTempDirs(t, "_server_test", 5)
	defer util.CleanupDirs(tmp)
	stopper := stop.NewStopper()
	defer stopper.Stop()
	testCases := []struct {
		key       string             // data directory
		expAttrs  roachpb.Attributes // attributes for engine
		wantError bool               // do we expect an error from this key?
		isMem     bool               // is the engine in-memory?
	}{
		{"mem=1000", roachpb.Attributes{Attrs: []string{"mem"}}, false, true},
예제 #25
0
// TestHTTPSenderRetryResponseCodes verifies that send is retried
// on some HTTP response codes but not on others.
func TestHTTPSenderRetryResponseCodes(t *testing.T) {
	defer leaktest.AfterTest(t)
	retryOptions := defaultRetryOptions
	retryOptions.InitialBackoff = 1 * time.Millisecond

	testCases := []struct {
		code  int
		retry bool
	}{
		{http.StatusServiceUnavailable, true},
		{http.StatusGatewayTimeout, true},
		{StatusTooManyRequests, true},
		{http.StatusRequestTimeout, false},
		{http.StatusBadRequest, false},
		{http.StatusNotFound, false},
		{http.StatusUnauthorized, false},
		{http.StatusForbidden, false},
		{http.StatusMethodNotAllowed, false},
		{http.StatusNotAcceptable, false},
		{http.StatusInternalServerError, false},
		{http.StatusNotImplemented, false},
	}
	for i, test := range testCases {
		count := 0
		server, addr := startTestHTTPServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
			count++
			if count == 1 {
				http.Error(w, "manufactured error", test.code)
				return
			}
			if !test.retry {
				t.Errorf("%d: didn't expect retry on code %d", i, test.code)
			}
			body, contentType, err := util.MarshalResponse(r, testPutResp, util.AllEncodings)
			if err != nil {
				t.Errorf("%d: failed to marshal response: %s", i, err)
			}
			w.Header().Set(util.ContentTypeHeader, contentType)
			w.Write(body)
		}))

		sender, err := newHTTPSender(addr, testutils.NewNodeTestBaseContext(), retryOptions)
		if err != nil {
			t.Fatal(err)
		}
		reply := &proto.PutResponse{}
		sender.Send(context.Background(), proto.Call{Args: testPutReq, Reply: reply})
		if test.retry {
			if count != 2 {
				t.Errorf("%d: expected retry", i)
			}
			if reply.GoError() != nil {
				t.Errorf("%d: expected success after retry; got %s", i, reply.GoError())
			}
		} else {
			if count != 1 {
				t.Errorf("%d; expected no retry; got %d", i, count)
			}
			if reply.GoError() == nil {
				t.Errorf("%d: expected error", i)
			}
		}
		server.Close()
	}
}