// initHTTP registers http prefixes. func (s *Server) initHTTP() { s.mux.Handle(rpc.DefaultRPCPath, s.rpc) s.mux.Handle("/", grpcutil.GRPCHandlerFunc(s.grpc, http.FileServer( &assetfs.AssetFS{ Asset: ui.Asset, AssetDir: ui.AssetDir, AssetInfo: ui.AssetInfo, }, ))) // The admin server handles both /debug/ and /_admin/ // TODO(marc): when cookie-based authentication exists, // apply it for all web endpoints. s.mux.Handle(adminEndpoint, s.admin) s.mux.Handle(debugEndpoint, s.admin) s.mux.Handle(statusPrefix, s.status) s.mux.Handle(healthEndpoint, s.status) s.mux.Handle(ts.URLPrefix, s.tsServer) // The SQL endpoints handles its own authentication, verifying user // credentials against the requested user. s.mux.Handle(driver.Endpoint, s.sqlServer) close(s.httpReady) }
// createTestNode creates an rpc server using the specified address, // gossip instance, KV database and a node using the specified slice // of engines. The server, clock and node are returned. If gossipBS is // not nil, the gossip bootstrap address is set to gossipBS. func createTestNode(addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T) ( *rpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) { ctx := storage.StoreContext{} stopper := stop.NewStopper() ctx.Clock = hlc.NewClock(hlc.UnixNano) nodeRPCContext := rpc.NewContext(nodeTestBaseContext, ctx.Clock, stopper) ctx.ScanInterval = 10 * time.Hour rpcServer := rpc.NewServer(nodeRPCContext) grpcServer := grpc.NewServer() tlsConfig, err := nodeRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), addr, tlsConfig) if err != nil { t.Fatal(err) } g := gossip.New(nodeRPCContext, testContext.GossipBootstrapResolvers, stopper) if gossipBS != nil { // Handle possibility of a :0 port specification. if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() { gossipBS = ln.Addr() } r, err := resolver.NewResolverFromAddress(gossipBS) if err != nil { t.Fatalf("bad gossip address %s: %s", gossipBS, err) } g.SetResolvers([]resolver.Resolver{r}) g.Start(grpcServer, ln.Addr()) } ctx.Gossip = g retryOpts := kv.GetDefaultDistSenderRetryOptions() retryOpts.Closer = stopper.ShouldDrain() distSender := kv.NewDistSender(&kv.DistSenderContext{ Clock: ctx.Clock, RPCContext: nodeRPCContext, RPCRetryOptions: &retryOpts, }, g) tracer := tracing.NewTracer() sender := kv.NewTxnCoordSender(distSender, ctx.Clock, false, tracer, stopper) ctx.DB = client.NewDB(sender) // TODO(bdarnell): arrange to have the transport closed. // (or attach LocalRPCTransport.Close to the stopper) ctx.Transport = storage.NewLocalRPCTransport(stopper) ctx.EventFeed = util.NewFeed(stopper) ctx.Tracer = tracer node := NewNode(ctx, metric.NewRegistry(), stopper, nil) return rpcServer, ln.Addr(), ctx.Clock, node, stopper }
// initHTTP registers http prefixes. func (s *Server) initHTTP() { s.mux.Handle(rpc.DefaultRPCPath, s.rpc) s.mux.Handle("/", grpcutil.GRPCHandlerFunc(s.grpc, http.FileServer( &assetfs.AssetFS{ Asset: ui.Asset, AssetDir: ui.AssetDir, AssetInfo: ui.AssetInfo, }, ))) // The admin server handles both /debug/ and /_admin/ // TODO(marc): when cookie-based authentication exists, // apply it for all web endpoints. s.mux.Handle(adminEndpoint, s.admin) s.mux.Handle(debugEndpoint, s.admin) s.mux.Handle(statusPrefix, s.status) s.mux.Handle(healthEndpoint, s.status) s.mux.Handle(ts.URLPrefix, s.tsServer) }
func TestSendAndReceive(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() defer stopper.Stop() nodeRPCContext := rpc.NewContext(nodeTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(nodeRPCContext, gossip.TestBootstrap, stopper) g.SetNodeID(roachpb.NodeID(1)) // Create several servers, each of which has two stores (A raft // node ID addresses a store). Node 1 has stores 1 and 2, node 2 has // stores 3 and 4, etc. // // We suppose that range 1 is replicated across the odd-numbered // stores in reverse order to ensure that the various IDs are not // equal: replica 1 is store 5, replica 2 is store 3, and replica 3 // is store 1. const numServers = 3 const storesPerServer = 2 const numStores = numServers * storesPerServer nextNodeID := roachpb.NodeID(2) nextStoreID := roachpb.StoreID(2) // Per-node state. transports := map[roachpb.NodeID]storage.RaftTransport{} // Per-store state. storeNodes := map[roachpb.StoreID]roachpb.NodeID{} channels := map[roachpb.StoreID]channelServer{} replicaIDs := map[roachpb.StoreID]roachpb.ReplicaID{ 1: 3, 3: 2, 5: 1, } for serverIndex := 0; serverIndex < numServers; serverIndex++ { nodeID := nextNodeID nextNodeID++ rpcServer := rpc.NewServer(nodeRPCContext) grpcServer := grpc.NewServer() tlsConfig, err := nodeRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), util.CreateTestAddr("tcp"), tlsConfig) if err != nil { t.Fatal(err) } addr := ln.Addr() // Have to call g.SetNodeID before call g.AddInfo g.SetNodeID(roachpb.NodeID(nodeID)) if err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID), &roachpb.NodeDescriptor{ Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }, time.Hour); err != nil { t.Fatal(err) } transport := newRPCTransport(g, grpcServer, nodeRPCContext) defer transport.Close() transports[nodeID] = transport for store := 0; store < storesPerServer; store++ { storeID := nextStoreID nextStoreID++ storeNodes[storeID] = nodeID channel := newChannelServer(10, 0) if err := transport.Listen(storeID, channel.RaftMessage); err != nil { t.Fatal(err) } channels[storeID] = channel } } // Heartbeat messages: Each store sends one message to each store. for fromStoreID, fromNodeID := range storeNodes { for toStoreID, toNodeID := range storeNodes { req := &storage.RaftMessageRequest{ GroupID: 0, Message: raftpb.Message{ Type: raftpb.MsgHeartbeat, From: uint64(fromStoreID), To: uint64(toStoreID), }, FromReplica: roachpb.ReplicaDescriptor{ NodeID: fromNodeID, StoreID: fromStoreID, ReplicaID: 0, }, ToReplica: roachpb.ReplicaDescriptor{ NodeID: toNodeID, StoreID: toStoreID, ReplicaID: 0, }, } if err := transports[fromNodeID].Send(req); err != nil { t.Errorf("Unable to send message from %d to %d: %s", fromNodeID, toNodeID, err) } } } // Read all the messages from the channels. Note that the transport // does not guarantee in-order delivery between independent // transports, so we just verify that the right number of messages // end up in each channel. for toStoreID := range storeNodes { for range storeNodes { select { case req := <-channels[toStoreID].ch: if req.Message.To != uint64(toStoreID) { t.Errorf("invalid message received on channel %d: %+v", toStoreID, req) } case <-time.After(5 * time.Second): t.Fatal("timed out waiting for message") } } select { case req := <-channels[toStoreID].ch: t.Errorf("got unexpected message %+v on channel %d", req, toStoreID) default: } } // Real raft messages have different node/store/replica IDs. // Send a message from replica 2 (on store 3, node 2) to replica 1 (on store 5, node 3) fromStoreID := roachpb.StoreID(3) toStoreID := roachpb.StoreID(5) req := &storage.RaftMessageRequest{ GroupID: 1, Message: raftpb.Message{ Type: raftpb.MsgApp, From: uint64(replicaIDs[fromStoreID]), To: uint64(replicaIDs[toStoreID]), }, FromReplica: roachpb.ReplicaDescriptor{ NodeID: storeNodes[fromStoreID], StoreID: fromStoreID, ReplicaID: replicaIDs[fromStoreID], }, ToReplica: roachpb.ReplicaDescriptor{ NodeID: storeNodes[toStoreID], StoreID: toStoreID, ReplicaID: replicaIDs[toStoreID], }, } if err := transports[storeNodes[fromStoreID]].Send(req); err != nil { t.Errorf("Unable to send message from %d to %d: %s", fromStoreID, toStoreID, err) } select { case req2 := <-channels[toStoreID].ch: if !reflect.DeepEqual(req, req2) { t.Errorf("got unexpected message %+v", req2) } case <-time.After(5 * time.Second): t.Fatal("timed out waiting for message") } select { case req := <-channels[toStoreID].ch: t.Errorf("got unexpected message %+v on channel %d", req, toStoreID) default: } }
// TestInOrderDelivery verifies that for a given pair of nodes, raft // messages are delivered in order. func TestInOrderDelivery(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() defer stopper.Stop() nodeRPCContext := rpc.NewContext(nodeTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper) g := gossip.New(nodeRPCContext, gossip.TestBootstrap, stopper) g.SetNodeID(roachpb.NodeID(1)) rpcServer := rpc.NewServer(nodeRPCContext) grpcServer := grpc.NewServer() tlsConfig, err := nodeRPCContext.GetServerTLSConfig() if err != nil { t.Fatal(err) } ln, err := util.ListenAndServe(stopper, grpcutil.GRPCHandlerFunc(grpcServer, rpcServer), util.CreateTestAddr("tcp"), tlsConfig) if err != nil { t.Fatal(err) } const numMessages = 100 nodeID := roachpb.NodeID(roachpb.NodeID(2)) serverTransport := newRPCTransport(g, grpcServer, nodeRPCContext) defer serverTransport.Close() serverChannel := newChannelServer(numMessages, 10*time.Millisecond) if err := serverTransport.Listen(roachpb.StoreID(nodeID), serverChannel.RaftMessage); err != nil { t.Fatal(err) } addr := ln.Addr() // Have to set gossip.NodeID before call gossip.AddInofXXX g.SetNodeID(nodeID) if err := g.AddInfoProto(gossip.MakeNodeIDKey(nodeID), &roachpb.NodeDescriptor{ Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()), }, time.Hour); err != nil { t.Fatal(err) } clientNodeID := roachpb.NodeID(2) clientTransport := newRPCTransport(g, nil, nodeRPCContext) defer clientTransport.Close() for i := 0; i < numMessages; i++ { req := &storage.RaftMessageRequest{ GroupID: 1, Message: raftpb.Message{ To: uint64(nodeID), From: uint64(clientNodeID), Commit: uint64(i), }, ToReplica: roachpb.ReplicaDescriptor{ NodeID: nodeID, StoreID: roachpb.StoreID(nodeID), ReplicaID: roachpb.ReplicaID(nodeID), }, FromReplica: roachpb.ReplicaDescriptor{ NodeID: clientNodeID, StoreID: roachpb.StoreID(clientNodeID), ReplicaID: roachpb.ReplicaID(clientNodeID), }, } if err := clientTransport.Send(req); err != nil { t.Errorf("failed to send message %d: %s", i, err) } } for i := 0; i < numMessages; i++ { req := <-serverChannel.ch if req.Message.Commit != uint64(i) { t.Errorf("messages out of order: got %d while expecting %d", req.Message.Commit, i) } } }