// NewNode creates a new raft node to use for tests func NewNode(t *testing.T, clockSource *fakeclock.FakeClock, tc *cautils.TestCA, opts ...raft.NodeOptions) *TestNode { l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err, "can't bind to raft service port") wrappedListener := NewWrappedListener(l) securityConfig, err := tc.NewNodeConfig(ca.ManagerRole) require.NoError(t, err) serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} s := grpc.NewServer(serverOpts...) cfg := raft.DefaultNodeConfig() stateDir, err := ioutil.TempDir("", "test-raft") require.NoError(t, err, "can't create temporary state directory") keyRotator := NewSimpleKeyRotator(raft.EncryptionKeys{CurrentDEK: []byte("current")}) newNodeOpts := raft.NodeOptions{ ID: securityConfig.ClientTLSCreds.NodeID(), Addr: l.Addr().String(), Config: cfg, StateDir: stateDir, ClockSource: clockSource, TLSCredentials: securityConfig.ClientTLSCreds, KeyRotator: keyRotator, } if len(opts) > 1 { panic("more than one optional argument provided") } if len(opts) == 1 { newNodeOpts.JoinAddr = opts[0].JoinAddr if opts[0].Addr != "" { newNodeOpts.Addr = opts[0].Addr } } n := raft.NewNode(newNodeOpts) healthServer := health.NewHealthServer() api.RegisterHealthServer(s, healthServer) raft.Register(s, n) go func() { // After stopping, we should receive an error from Serve assert.Error(t, s.Serve(wrappedListener)) }() healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING) return &TestNode{ Node: n, Listener: wrappedListener, SecurityConfig: securityConfig, Address: newNodeOpts.Addr, StateDir: newNodeOpts.StateDir, Server: s, KeyRotator: keyRotator, } }
// CopyNode returns a copy of a node func CopyNode(t *testing.T, clockSource *fakeclock.FakeClock, oldNode *TestNode, forceNewCluster bool, kr *SimpleKeyRotator) (*TestNode, context.Context) { wrappedListener := RecycleWrappedListener(oldNode.Listener) securityConfig := oldNode.SecurityConfig serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} s := grpc.NewServer(serverOpts...) cfg := raft.DefaultNodeConfig() if kr == nil { kr = oldNode.KeyRotator } newNodeOpts := raft.NodeOptions{ ID: securityConfig.ClientTLSCreds.NodeID(), Addr: oldNode.Address, Config: cfg, StateDir: oldNode.StateDir, ForceNewCluster: forceNewCluster, ClockSource: clockSource, SendTimeout: 2 * time.Second, TLSCredentials: securityConfig.ClientTLSCreds, KeyRotator: kr, } ctx, cancel := context.WithCancel(context.Background()) n := raft.NewNode(newNodeOpts) healthServer := health.NewHealthServer() api.RegisterHealthServer(s, healthServer) raft.Register(s, n) go func() { // After stopping, we should receive an error from Serve require.Error(t, s.Serve(wrappedListener)) }() healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING) return &TestNode{ Node: n, Listener: wrappedListener, SecurityConfig: securityConfig, Address: newNodeOpts.Addr, StateDir: newNodeOpts.StateDir, cancel: cancel, Server: s, KeyRotator: kr, }, ctx }
func TestRaftUnreachableNode(t *testing.T) { t.Parallel() nodes := make(map[uint64]*raftutils.TestNode) var clockSource *fakeclock.FakeClock nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Add a new node nodes[2] = raftutils.NewNode(t, clockSource, tc, raft.NodeOptions{JoinAddr: nodes[1].Address}) err := nodes[2].JoinAndStart(ctx) require.NoError(t, err, "can't join cluster") go nodes[2].Run(ctx) // Stop the Raft server of second node on purpose after joining nodes[2].Server.Stop() nodes[2].Listener.Close() raftutils.AdvanceTicks(clockSource, 5) time.Sleep(100 * time.Millisecond) wrappedListener := raftutils.RecycleWrappedListener(nodes[2].Listener) securityConfig := nodes[2].SecurityConfig serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} s := grpc.NewServer(serverOpts...) nodes[2].Server = s raft.Register(s, nodes[2].Node) go func() { // After stopping, we should receive an error from Serve assert.Error(t, s.Serve(wrappedListener)) }() raftutils.WaitForCluster(t, clockSource, nodes) defer raftutils.TeardownCluster(t, nodes) // Propose a value value, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime) assert.NoError(t, err, "failed to propose value") // All nodes should have the value in the physical store raftutils.CheckValue(t, clockSource, nodes[1], value) raftutils.CheckValue(t, clockSource, nodes[2], value) }
// RestartNode restarts a raft test node func RestartNode(t *testing.T, clockSource *fakeclock.FakeClock, oldNode *TestNode, forceNewCluster bool) *TestNode { wrappedListener := RecycleWrappedListener(oldNode.Listener) securityConfig := oldNode.SecurityConfig serverOpts := []grpc.ServerOption{grpc.Creds(securityConfig.ServerTLSCreds)} s := grpc.NewServer(serverOpts...) cfg := raft.DefaultNodeConfig() newNodeOpts := raft.NewNodeOptions{ ID: securityConfig.ClientTLSCreds.NodeID(), Addr: oldNode.Address, Config: cfg, StateDir: oldNode.StateDir, ForceNewCluster: forceNewCluster, ClockSource: clockSource, SendTimeout: 10 * time.Second, TLSCredentials: securityConfig.ClientTLSCreds, } ctx := context.Background() n := raft.NewNode(ctx, newNodeOpts) n.Server = s healthServer := health.NewHealthServer() api.RegisterHealthServer(s, healthServer) raft.Register(s, n) go func() { // After stopping, we should receive an error from Serve assert.Error(t, s.Serve(wrappedListener)) }() healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING) err := n.JoinAndStart() require.NoError(t, err, "can't join cluster") go n.Run(ctx) return &TestNode{Node: n, Listener: wrappedListener, SecurityConfig: securityConfig} }
func TestRaftUnreachableNode(t *testing.T) { t.Parallel() nodes := make(map[uint64]*raftutils.TestNode) var clockSource *fakeclock.FakeClock nodes[1], clockSource = raftutils.NewInitNode(t, tc, nil) ctx := context.Background() // Add a new node, but don't start its server yet n := raftutils.NewNode(t, clockSource, tc, raft.NewNodeOptions{JoinAddr: nodes[1].Address}) go n.Run(ctx) raftutils.AdvanceTicks(clockSource, 5) time.Sleep(100 * time.Millisecond) raft.Register(n.Server, n.Node) // Now start the new node's server go func() { // After stopping, we should receive an error from Serve assert.Error(t, n.Server.Serve(n.Listener)) }() nodes[2] = n raftutils.WaitForCluster(t, clockSource, nodes) defer raftutils.TeardownCluster(t, nodes) // Propose a value value, err := raftutils.ProposeValue(t, nodes[1]) assert.NoError(t, err, "failed to propose value") // All nodes should have the value in the physical store raftutils.CheckValue(t, clockSource, nodes[1], value) raftutils.CheckValue(t, clockSource, nodes[2], value) }