// start starts the node by registering the storage instance for the // RPC service "Node" and initializing stores for each specified // engine. Launches periodic store gossiping in a goroutine. func (n *Node) start(rpcServer *rpc.Server, addr net.Addr, engines []engine.Engine, attrs roachpb.Attributes, stopper *stop.Stopper) error { n.initDescriptor(addr, attrs) const method = "Node.Batch" if err := rpcServer.Register(method, n.executeCmd, &roachpb.BatchRequest{}); err != nil { log.Fatalf("unable to register node service with RPC server: %s", err) } // Start status monitor. n.status.StartMonitorFeed(n.ctx.EventFeed) // Initialize stores, including bootstrapping new ones. if err := n.initStores(engines, stopper); err != nil { return err } n.startedAt = n.ctx.Clock.Now().WallTime // Initialize publisher for Node Events. This requires the NodeID, which is // initialized by initStores(); because of this, some Store initialization // events will precede the StartNodeEvent on the feed. n.feed = status.NewNodeEventFeed(n.Descriptor.NodeID, n.ctx.EventFeed) n.feed.StartNode(n.Descriptor, n.startedAt) n.startPublishStatuses(stopper) n.startGossip(stopper) log.Infoc(n.context(), "Started node with %v engine(s) and attributes %v", engines, attrs.Attrs) return nil }
// start starts the node by registering the storage instance for the // RPC service "Node" and initializing stores for each specified // engine. Launches periodic store gossiping in a goroutine. func (n *Node) start(rpcServer *rpc.Server, engines []engine.Engine, attrs proto.Attributes, stopper *util.Stopper) error { n.initDescriptor(rpcServer.Addr(), attrs) if err := rpcServer.RegisterName("Node", (*nodeServer)(n)); err != nil { log.Fatalf("unable to register node service with RPC server: %s", err) } // Start status monitor. n.status.StartMonitorFeed(n.ctx.EventFeed) stopper.AddCloser(n.ctx.EventFeed) // Initialize stores, including bootstrapping new ones. if err := n.initStores(engines, stopper); err != nil { return err } // Pass NodeID to status monitor - this value is initialized in initStores, // but the StatusMonitor must be active before initStores. n.status.SetNodeID(n.Descriptor.NodeID) // Initialize publisher for Node Events. n.feed = status.NewNodeEventFeed(n.Descriptor.NodeID, n.ctx.EventFeed) n.startedAt = n.ctx.Clock.Now().WallTime n.startStoresScanner(stopper) n.startPublishStatuses(stopper) n.startGossip(stopper) log.Infoc(n.context(), "Started node with %v engine(s) and attributes %v", engines, attrs.Attrs) return nil }
// TestNodeEventFeedTransactionRestart verifies that calls which indicate a // transaction restart are counted as successful. func TestNodeEventFeedTransactionRestart(t *testing.T) { defer leaktest.AfterTest(t) stopper, feed, consumers := startConsumerSet(1) nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed) ner := &nodeEventReader{} sub := feed.Subscribe() stopper.RunWorker(func() { ner.readEvents(sub) }) nodeID := proto.NodeID(1) nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{ ResponseHeader: proto.ResponseHeader{ Error: &proto.Error{ TransactionRestart: proto.TransactionRestart_BACKOFF, }, }, }) nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{ ResponseHeader: proto.ResponseHeader{ Error: &proto.Error{ TransactionRestart: proto.TransactionRestart_IMMEDIATE, }, }, }) nodefeed.CallComplete(&proto.PutRequest{}, &proto.PutResponse{ ResponseHeader: proto.ResponseHeader{ Error: &proto.Error{ TransactionRestart: proto.TransactionRestart_ABORT, }, }, }) feed.Close() stopper.Stop() c := consumers[0] exp := []interface{}{ &status.CallSuccessEvent{ NodeID: nodeID, Method: proto.Get, }, &status.CallSuccessEvent{ NodeID: nodeID, Method: proto.Get, }, &status.CallErrorEvent{ NodeID: nodeID, Method: proto.Put, }, } if !reflect.DeepEqual(exp, c.received) { t.Fatalf("received unexpected events: %s", ner.eventFeedString()) } }
// start starts the node by registering the storage instance for the // RPC service "Node" and initializing stores for each specified // engine. Launches periodic store gossiping in a goroutine. func (n *Node) start(rpcServer *rpc.Server, engines []engine.Engine, attrs proto.Attributes, stopper *stop.Stopper) error { n.initDescriptor(rpcServer.Addr(), attrs) requests := []proto.Request{ &proto.BatchRequest{}, &proto.GetRequest{}, &proto.PutRequest{}, &proto.ConditionalPutRequest{}, &proto.IncrementRequest{}, &proto.DeleteRequest{}, &proto.DeleteRangeRequest{}, &proto.ScanRequest{}, &proto.ReverseScanRequest{}, &proto.EndTransactionRequest{}, &proto.AdminSplitRequest{}, &proto.AdminMergeRequest{}, &proto.HeartbeatTxnRequest{}, &proto.GCRequest{}, &proto.PushTxnRequest{}, &proto.RangeLookupRequest{}, &proto.ResolveIntentRequest{}, &proto.ResolveIntentRangeRequest{}, &proto.MergeRequest{}, &proto.TruncateLogRequest{}, &proto.LeaderLeaseRequest{}, } for _, r := range requests { if err := rpcServer.Register("Node."+r.Method().String(), n.executeCmd, r); err != nil { log.Fatalf("unable to register node service with RPC server: %s", err) } } // Start status monitor. n.status.StartMonitorFeed(n.ctx.EventFeed) // Initialize stores, including bootstrapping new ones. if err := n.initStores(engines, stopper); err != nil { return err } n.startedAt = n.ctx.Clock.Now().WallTime // Initialize publisher for Node Events. This requires the NodeID, which is // initialized by initStores(); because of this, some Store initialization // events will precede the StartNodeEvent on the feed. n.feed = status.NewNodeEventFeed(n.Descriptor.NodeID, n.ctx.EventFeed) n.feed.StartNode(n.Descriptor, n.startedAt) n.startPublishStatuses(stopper) n.startGossip(stopper) log.Infoc(n.context(), "Started node with %v engine(s) and attributes %v", engines, attrs.Attrs) return nil }
// start starts the node by registering the storage instance for the // RPC service "Node" and initializing stores for each specified // engine. Launches periodic store gossiping in a goroutine. func (n *Node) start(rpcServer *rpc.Server, addr net.Addr, engines []engine.Engine, attrs roachpb.Attributes) error { n.initDescriptor(addr, attrs) // Start status monitor. n.status.StartMonitorFeed(n.ctx.EventFeed) // Initialize stores, including bootstrapping new ones. if err := n.initStores(engines, n.stopper); err != nil { if err == errNeedsBootstrap { // This node has no initialized stores and no way to connect to // an existing cluster, so we bootstrap it. clusterID, err := bootstrapCluster(engines) if err != nil { return err } log.Infof("**** cluster %s has been created", clusterID) log.Infof("**** add additional nodes by specifying --join=%s", addr) // Make sure we add the node as a resolver. selfResolver, err := resolver.NewResolverFromAddress(addr) if err != nil { return err } n.ctx.Gossip.SetResolvers([]resolver.Resolver{selfResolver}) // After bootstrapping, try again to initialize the stores. if err := n.initStores(engines, n.stopper); err != nil { return err } } else { return err } } n.startedAt = n.ctx.Clock.Now().WallTime // Initialize publisher for Node Events. This requires the NodeID, which is // initialized by initStores(); because of this, some Store initialization // events will precede the StartNodeEvent on the feed. n.feed = status.NewNodeEventFeed(n.Descriptor.NodeID, n.ctx.EventFeed) n.feed.StartNode(n.Descriptor, n.startedAt) n.startPublishStatuses(n.stopper) n.startGossip(n.stopper) // Register the RPC methods we support last as doing so allows RPCs to be // received which may access state initialized above without locks. const method = "Node.Batch" if err := rpcServer.Register(method, n.executeCmd, &roachpb.BatchRequest{}); err != nil { log.Fatalf("unable to register node service with RPC server: %s", err) } log.Infoc(n.context(), "Started node with %v engine(s) and attributes %v", engines, attrs.Attrs) return nil }
// TestNodeEventFeedTransactionRestart verifies that calls which indicate a // transaction restart are counted as successful. func TestNodeEventFeedTransactionRestart(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() feed := util.NewFeed(stopper) nodeID := proto.NodeID(1) nodefeed := status.NewNodeEventFeed(nodeID, feed) ner := nodeEventReader{} ner.readEvents(feed) nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{ ResponseHeader: proto.ResponseHeader{ Error: &proto.Error{ TransactionRestart: proto.TransactionRestart_BACKOFF, }, }, }) nodefeed.CallComplete(&proto.GetRequest{}, &proto.GetResponse{ ResponseHeader: proto.ResponseHeader{ Error: &proto.Error{ TransactionRestart: proto.TransactionRestart_IMMEDIATE, }, }, }) nodefeed.CallComplete(&proto.PutRequest{}, &proto.PutResponse{ ResponseHeader: proto.ResponseHeader{ Error: &proto.Error{ TransactionRestart: proto.TransactionRestart_ABORT, }, }, }) feed.Flush() stopper.Stop() exp := []string{ "Get", "Get", "failed Put", } if !reflect.DeepEqual(exp, ner.perNodeFeeds[nodeID]) { t.Fatalf("received unexpected events: %s", ner.eventFeedString()) } }
// TestNodeEventFeedTransactionRestart verifies that calls which indicate a // transaction restart are counted as successful. func TestNodeEventFeedTransactionRestart(t *testing.T) { defer leaktest.AfterTest(t) stopper := stop.NewStopper() feed := util.NewFeed(stopper) nodeID := roachpb.NodeID(1) nodefeed := status.NewNodeEventFeed(nodeID, feed) ner := nodeEventReader{} ner.readEvents(feed) d := 5 * time.Second get := wrap(&roachpb.GetRequest{}) nodefeed.CallComplete(get, d, &roachpb.Error{ TransactionRestart: roachpb.TransactionRestart_BACKOFF}) nodefeed.CallComplete(get, d, &roachpb.Error{ TransactionRestart: roachpb.TransactionRestart_IMMEDIATE}) nodefeed.CallComplete(wrap(&roachpb.PutRequest{}), d, &roachpb.Error{ TransactionRestart: roachpb.TransactionRestart_ABORT}) nodefeed.CallComplete(wrap(&roachpb.PutRequest{}), d, &roachpb.Error{ Detail: &roachpb.ErrorDetail{ WriteIntent: &roachpb.WriteIntentError{ Index: &roachpb.ErrPosition{Index: 0}, }, }, TransactionRestart: roachpb.TransactionRestart_ABORT, }) feed.Flush() stopper.Stop() exp := []string{ "Get", "Get", "failed Batch", "failed Put", } if !reflect.DeepEqual(exp, ner.perNodeFeeds[nodeID]) { t.Fatalf("received unexpected events: %s", ner.eventFeedString()) } }
func TestNodeEventFeed(t *testing.T) { defer leaktest.AfterTest(t) nodeDesc := roachpb.NodeDescriptor{ NodeID: roachpb.NodeID(99), } // A testCase corresponds to a single Store event type. Each case contains a // method which publishes a single event to the given storeEventPublisher, // and an expected result interface which should match the produced // event. testCases := []struct { publishTo func(status.NodeEventFeed) expected interface{} }{ { publishTo: func(nef status.NodeEventFeed) { nef.StartNode(nodeDesc, 100) }, expected: &status.StartNodeEvent{ Desc: nodeDesc, StartedAt: 100, }, }, { publishTo: func(nef status.NodeEventFeed) { nef.CallComplete(wrap(roachpb.NewGet(roachpb.Key("abc"))), 0, nil) }, expected: &status.CallSuccessEvent{ NodeID: roachpb.NodeID(1), Method: roachpb.Get, }, }, { publishTo: func(nef status.NodeEventFeed) { nef.CallComplete(wrap(roachpb.NewPut(roachpb.Key("abc"), roachpb.MakeValueFromString("def"))), 0, nil) }, expected: &status.CallSuccessEvent{ NodeID: roachpb.NodeID(1), Method: roachpb.Put, }, }, { publishTo: func(nef status.NodeEventFeed) { nef.CallComplete(wrap(roachpb.NewGet(roachpb.Key("abc"))), 0, roachpb.NewErrorf("error")) }, expected: &status.CallErrorEvent{ NodeID: roachpb.NodeID(1), Method: roachpb.Batch, }, }, { publishTo: func(nef status.NodeEventFeed) { nef.CallComplete(wrap(roachpb.NewGet(roachpb.Key("abc"))), time.Minute, &roachpb.Error{ Detail: &roachpb.ErrorDetail{ WriteIntent: &roachpb.WriteIntentError{}, }, Index: &roachpb.ErrPosition{Index: 0}, Message: "boo", }) }, expected: &status.CallErrorEvent{ NodeID: roachpb.NodeID(1), Method: roachpb.Get, Duration: time.Minute, }, }, } // Compile expected events into a single slice. expectedEvents := make([]interface{}, len(testCases)) for i := range testCases { expectedEvents[i] = testCases[i].expected } events := make([]interface{}, 0, len(expectedEvents)) // Run test cases directly through a feed. stopper := stop.NewStopper() defer stopper.Stop() feed := util.NewFeed(stopper) feed.Subscribe(func(event interface{}) { events = append(events, event) }) nodefeed := status.NewNodeEventFeed(roachpb.NodeID(1), feed) for _, tc := range testCases { tc.publishTo(nodefeed) } feed.Flush() if a, e := events, expectedEvents; !reflect.DeepEqual(a, e) { t.Errorf("received incorrect events.\nexpected: %v\nactual: %v", e, a) } }
func TestNodeEventFeed(t *testing.T) { defer leaktest.AfterTest(t) nodeDesc := proto.NodeDescriptor{ NodeID: proto.NodeID(99), } // A testCase corresponds to a single Store event type. Each case contains a // method which publishes a single event to the given storeEventPublisher, // and an expected result interface which should match the produced // event. testCases := []struct { name string publishTo func(status.NodeEventFeed) expected interface{} }{ { name: "Start", publishTo: func(nef status.NodeEventFeed) { nef.StartNode(nodeDesc, 100) }, expected: &status.StartNodeEvent{ Desc: nodeDesc, StartedAt: 100, }, }, { name: "Get", publishTo: func(nef status.NodeEventFeed) { call := proto.GetCall(proto.Key("abc")) nef.CallComplete(call.Args, call.Reply) }, expected: &status.CallSuccessEvent{ NodeID: proto.NodeID(1), Method: proto.Get, }, }, { name: "Put", publishTo: func(nef status.NodeEventFeed) { call := proto.PutCall(proto.Key("abc"), proto.Value{Bytes: []byte("def")}) nef.CallComplete(call.Args, call.Reply) }, expected: &status.CallSuccessEvent{ NodeID: proto.NodeID(1), Method: proto.Put, }, }, { name: "Get Error", publishTo: func(nef status.NodeEventFeed) { call := proto.GetCall(proto.Key("abc")) call.Reply.Header().SetGoError(util.Errorf("error")) nef.CallComplete(call.Args, call.Reply) }, expected: &status.CallErrorEvent{ NodeID: proto.NodeID(1), Method: proto.Get, }, }, } // Compile expected events into a single slice. expectedEvents := make([]interface{}, len(testCases)) for i := range testCases { expectedEvents[i] = testCases[i].expected } events := make([]interface{}, 0, len(expectedEvents)) // Run test cases directly through a feed. stopper := stop.NewStopper() defer stopper.Stop() feed := util.NewFeed(stopper) feed.Subscribe(func(event interface{}) { events = append(events, event) }) nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed) for _, tc := range testCases { tc.publishTo(nodefeed) } feed.Flush() if a, e := events, expectedEvents; !reflect.DeepEqual(a, e) { t.Errorf("received incorrect events.\nexpected: %v\nactual: %v", e, a) } }
func TestNodeEventFeed(t *testing.T) { defer leaktest.AfterTest(t) // A testCase corresponds to a single Store event type. Each case contains a // method which publishes a single event to the given storeEventPublisher, // and an expected result interface which should match the produced // event. testCases := []struct { name string publishTo func(status.NodeEventFeed) expected interface{} }{ { name: "Get", publishTo: func(nef status.NodeEventFeed) { call := proto.GetCall(proto.Key("abc")) nef.CallComplete(call.Args, call.Reply) }, expected: &status.CallSuccessEvent{ NodeID: proto.NodeID(1), Method: proto.Get, }, }, { name: "Put", publishTo: func(nef status.NodeEventFeed) { call := proto.PutCall(proto.Key("abc"), proto.Value{Bytes: []byte("def")}) nef.CallComplete(call.Args, call.Reply) }, expected: &status.CallSuccessEvent{ NodeID: proto.NodeID(1), Method: proto.Put, }, }, { name: "Get Error", publishTo: func(nef status.NodeEventFeed) { call := proto.GetCall(proto.Key("abc")) call.Reply.Header().SetGoError(util.Errorf("error")) nef.CallComplete(call.Args, call.Reply) }, expected: &status.CallErrorEvent{ NodeID: proto.NodeID(1), Method: proto.Get, }, }, } // Compile expected events into a single slice. expectedEvents := make([]interface{}, len(testCases)) for i := range testCases { expectedEvents[i] = testCases[i].expected } // assertEventsEqual verifies that the given set of events is equal to the // expectedEvents. verifyEventSlice := func(source string, events []interface{}) { if a, e := len(events), len(expectedEvents); a != e { t.Errorf("%s had wrong number of events %d, expected %d", source, a, e) return } for i := range events { if a, e := events[i], expectedEvents[i]; !reflect.DeepEqual(a, e) { t.Errorf("%s had wrong event for case %s: got %v, expected %v", source, testCases[i].name, a, e) } } } // Run test cases directly through a feed. stopper, feed, consumers := startConsumerSet(3) nodefeed := status.NewNodeEventFeed(proto.NodeID(1), feed) for _, tc := range testCases { tc.publishTo(nodefeed) } feed.Close() waitForStopper(t, stopper) for i, c := range consumers { verifyEventSlice(fmt.Sprintf("feed direct consumer %d", i), c.received) } }