func TestRPCClientForceLeave(t *testing.T) { p1 := testRPCClient(t) p2 := testRPCClient(t) defer p1.Close() defer p2.Close() testutil.Yield() s2Addr := fmt.Sprintf("127.0.0.1:%d", p2.agent.config.Ports.SerfLan) if _, err := p1.agent.JoinLAN([]string{s2Addr}); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := p2.agent.Shutdown(); err != nil { t.Fatalf("err: %s", err) } time.Sleep(time.Second) if err := p1.client.ForceLeave(p2.agent.config.NodeName); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() m := p1.agent.LANMembers() if len(m) != 2 { t.Fatalf("should have 2 members: %#v", m) } if m[1].Status != serf.StatusLeft { t.Fatalf("should be left: %#v %v", m[1], m[1].Status == serf.StatusLeft) } }
func TestRPCClientAuth(t *testing.T) { cl, a1, ipc := testRPCClient(t) defer ipc.Shutdown() defer cl.Close() defer a1.Shutdown() // Setup an auth key ipc.authKey = "foobar" if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := cl.UserEvent("deploy", nil, false); err.Error() != authRequired { t.Fatalf("err: %s", err) } testutil.Yield() config := client.Config{Addr: ipc.listener.Addr().String(), AuthKey: "foobar"} rpcClient, err := client.ClientFromConfig(&config) if err != nil { t.Fatalf("err: %s", err) } defer rpcClient.Close() if err := rpcClient.UserEvent("deploy", nil, false); err != nil { t.Fatalf("err: %s", err) } }
func TestSerf_eventsJoin(t *testing.T) { // Create the s1 config with an event channel so we can listen eventCh := make(chan Event, 4) s1Config := testConfig() s1Config.EventCh = eventCh s2Config := testConfig() s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() defer s2.Shutdown() testutil.Yield() _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() testEvents(t, eventCh, s2Config.NodeName, []EventType{EventMemberJoin}) }
func TestRPCClientWANMembers(t *testing.T) { p1 := testRPCClient(t) p2 := testRPCClient(t) defer p1.Close() defer p2.Close() testutil.Yield() mem, err := p1.client.WANMembers() if err != nil { t.Fatalf("err: %s", err) } if len(mem) != 1 { t.Fatalf("bad: %#v", mem) } s2Addr := fmt.Sprintf("127.0.0.1:%d", p2.agent.config.Ports.SerfWan) _, err = p1.client.Join([]string{s2Addr}, true) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() mem, err = p1.client.WANMembers() if err != nil { t.Fatalf("err: %s", err) } if len(mem) != 2 { t.Fatalf("bad: %#v", mem) } }
func TestSerf_reconnect_sameIP(t *testing.T) { eventCh := make(chan Event, 64) s1Config := testConfig() s1Config.EventCh = eventCh s2Config := testConfig() s2Config.MemberlistConfig.BindAddr = s1Config.MemberlistConfig.BindAddr s2Config.MemberlistConfig.BindPort = s1Config.MemberlistConfig.BindPort + 1 s2Addr := fmt.Sprintf("%s:%d", s2Config.MemberlistConfig.BindAddr, s2Config.MemberlistConfig.BindPort) s2Name := s2Config.NodeName s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() defer s2.Shutdown() testutil.Yield() _, err = s1.Join([]string{s2Addr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Now force the shutdown of s2 so it appears to fail. if err := s2.Shutdown(); err != nil { t.Fatalf("err: %s", err) } time.Sleep(s2Config.MemberlistConfig.ProbeInterval * 5) // Bring back s2 by mimicking its name and address s2Config = testConfig() s2Config.MemberlistConfig.BindAddr = s1Config.MemberlistConfig.BindAddr s2Config.MemberlistConfig.BindPort = s1Config.MemberlistConfig.BindPort + 1 s2Config.NodeName = s2Name s2, err = Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } time.Sleep(s1Config.ReconnectInterval * 5) testEvents(t, eventCh, s2Name, []EventType{EventMemberJoin, EventMemberFailed, EventMemberJoin}) }
func TestSerf_joinLeave(t *testing.T) { s1Config := testConfig() s2Config := testConfig() s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() defer s2.Shutdown() testutil.Yield() if len(s1.Members()) != 1 { t.Fatalf("s1 members: %d", len(s1.Members())) } if len(s2.Members()) != 1 { t.Fatalf("s2 members: %d", len(s2.Members())) } _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if len(s1.Members()) != 2 { t.Fatalf("s1 members: %d", len(s1.Members())) } if len(s2.Members()) != 2 { t.Fatalf("s2 members: %d", len(s2.Members())) } err = s1.Leave() if err != nil { t.Fatalf("err: %s", err) } // Give the reaper time to reap nodes time.Sleep(s1Config.ReapInterval * 2) if len(s1.Members()) != 1 { t.Fatalf("s1 members: %d", len(s1.Members())) } if len(s2.Members()) != 1 { t.Fatalf("s2 members: %d", len(s2.Members())) } }
func TestSerf_Leave_SnapshotRecovery(t *testing.T) { td, err := ioutil.TempDir("", "serf") if err != nil { t.Fatalf("err: %v", err) } defer os.RemoveAll(td) s1Config := testConfig() s2Config := testConfig() s2Config.SnapshotPath = td + "snap" s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s2.Shutdown() _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := s2.Leave(); err != nil { t.Fatalf("err: %s", err) } if err := s2.Shutdown(); err != nil { t.Fatalf("err: %s", err) } time.Sleep(s2Config.MemberlistConfig.ProbeInterval * 5) // Verify that s2 is "left" testMember(t, s1.Members(), s2Config.NodeName, StatusLeft) // Restart s2 from the snapshot now! s2Config.EventCh = nil s2, err = Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s2.Shutdown() // Wait for the node to auto rejoin testutil.Yield() // Verify that s2 is didn't join testMember(t, s1.Members(), s2Config.NodeName, StatusLeft) if len(s2.Members()) != 1 { t.Fatalf("bad members: %#v", s2.Members()) } }
func TestRPCClientForceLeave(t *testing.T) { client, a1, ipc := testRPCClient(t) a2 := testAgent(nil) defer ipc.Shutdown() defer client.Close() defer a1.Shutdown() defer a2.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } if err := a2.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() s2Addr := a2.conf.MemberlistConfig.BindAddr if _, err := a1.Join([]string{s2Addr}, false); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := a2.Shutdown(); err != nil { t.Fatalf("err: %s", err) } start := time.Now() WAIT: time.Sleep(a1.conf.MemberlistConfig.ProbeInterval * 3) m := a1.Serf().Members() if len(m) != 2 { t.Fatalf("should have 2 members: %#v", a1.Serf().Members()) } if findMember(t, m, a2.conf.NodeName).Status != serf.StatusFailed && time.Now().Sub(start) < 3*time.Second { goto WAIT } if err := client.ForceLeave(a2.conf.NodeName); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() m = a1.Serf().Members() if len(m) != 2 { t.Fatalf("should have 2 members: %#v", a1.Serf().Members()) } if findMember(t, m, a2.conf.NodeName).Status != serf.StatusLeft { t.Fatalf("should be left: %#v", m[1]) } }
func TestSerf_NameResolution(t *testing.T) { // Create the s1 config with an event channel so we can listen s1Config := testConfig() s2Config := testConfig() s3Config := testConfig() s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s2.Shutdown() // Create an artificial node name conflict! s3Config.NodeName = s1Config.NodeName s3, err := Create(s3Config) if err != nil { t.Fatalf("err: %s", err) } defer s3.Shutdown() testutil.Yield() // Join s1 to s2 first. s2 should vote for s1 in conflict _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } _, err = s1.Join([]string{s3Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Wait for the query period to end time.Sleep(s1.DefaultQueryTimeout() * 2) // s3 should have shutdown, while s1 is running if s1.State() != SerfAlive { t.Fatalf("bad: %v", s1.State()) } if s2.State() != SerfAlive { t.Fatalf("bad: %v", s2.State()) } if s3.State() != SerfShutdown { t.Fatalf("bad: %v", s3.State()) } }
func TestRPCClientStream_Query(t *testing.T) { cl, a1, ipc := testRPCClient(t) defer ipc.Shutdown() defer cl.Close() defer a1.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } eventCh := make(chan map[string]interface{}, 64) if handle, err := cl.Stream("query", eventCh); err != nil { t.Fatalf("err: %s", err) } else { defer cl.Stop(handle) } testutil.Yield() params := client.QueryParam{ Timeout: 200 * time.Millisecond, Name: "deploy", Payload: []byte("foo"), } if err := cl.Query(¶ms); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() select { case e := <-eventCh: if e["Event"].(string) != "query" { t.Fatalf("bad query: %#v", e) } if e["ID"].(int64) != 1 { t.Fatalf("bad query: %#v", e) } if e["LTime"].(int64) != 1 { t.Fatalf("bad query: %#v", e) } if e["Name"].(string) != "deploy" { t.Fatalf("bad query: %#v", e) } if bytes.Compare(e["Payload"].([]byte), []byte("foo")) != 0 { t.Fatalf("bad query: %#v", e) } default: t.Fatalf("should have query") } }
func TestRPCClientMonitor(t *testing.T) { p1 := testRPCClient(t) defer p1.Close() testutil.Yield() eventCh := make(chan string, 64) if handle, err := p1.client.Monitor("debug", eventCh); err != nil { t.Fatalf("err: %s", err) } else { defer p1.client.Stop(handle) } testutil.Yield() found := false OUTER1: for { select { case e := <-eventCh: if strings.Contains(e, "Accepted client") { found = true } default: break OUTER1 } } if !found { t.Fatalf("should log client accept") } // Join a bad thing to generate more events p1.agent.JoinLAN(nil) testutil.Yield() found = false OUTER2: for { select { case e := <-eventCh: if strings.Contains(e, "joining") { found = true } default: break OUTER2 } } if !found { t.Fatalf("should log joining") } }
func TestRPCClientMonitor(t *testing.T) { client, a1 := testRPCClient(t) defer client.Close() defer a1.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } eventCh := make(chan string, 64) doneCh := make(chan struct{}, 64) defer close(doneCh) if err := client.Monitor("debug", eventCh, doneCh); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() select { case e := <-eventCh: if !strings.Contains(e, "starting") { t.Fatalf("bad: %s", e) } default: t.Fatalf("should have backlog") } // Drain the rest of the messages as we know it drainEventCh(eventCh) // Join a bad thing to generate more events a1.Join(nil) testutil.Yield() select { case e := <-eventCh: if !strings.Contains(e, "joining") { t.Fatalf("bad: %s", e) } default: t.Fatalf("should have message") } // End the monitor and wait for the eventCh to close doneCh <- struct{}{} for _ = range eventCh { } }
func TestAgentTagsFile(t *testing.T) { tags := map[string]string{ "role": "webserver", "datacenter": "us-east", } td, err := ioutil.TempDir("", "serf") if err != nil { t.Fatalf("err: %s", err) } defer os.RemoveAll(td) agentConfig := DefaultConfig() agentConfig.TagsFile = filepath.Join(td, "tags.json") a1 := testAgentWithConfig(agentConfig, serf.DefaultConfig(), nil) if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } defer a1.Shutdown() defer a1.Leave() testutil.Yield() err = a1.SetTags(tags) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() a2 := testAgentWithConfig(agentConfig, serf.DefaultConfig(), nil) if err := a2.Start(); err != nil { t.Fatalf("err: %s", err) } defer a2.Shutdown() defer a2.Leave() testutil.Yield() m := a2.Serf().LocalMember() if !reflect.DeepEqual(m.Tags, tags) { t.Fatalf("tags not restored: %#v", m.Tags) } }
func TestSerf_eventsUser(t *testing.T) { // Create the s1 config with an event channel so we can listen eventCh := make(chan Event, 4) s1Config := testConfig() s2Config := testConfig() s2Config.EventCh = eventCh s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() defer s2.Shutdown() testutil.Yield() _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Fire a user event if err := s1.UserEvent("event!", []byte("test"), false); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Fire a user event if err := s1.UserEvent("second", []byte("foobar"), false); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // check the events to make sure we got // a leave event in s1 about the leave. testUserEvents(t, eventCh, []string{"event!", "second"}, [][]byte{[]byte("test"), []byte("foobar")}) }
func TestSerf_joinLeave_ltime(t *testing.T) { s1Config := testConfig() s2Config := testConfig() s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s2.Shutdown() testutil.Yield() _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if s2.members[s1.config.NodeName].statusLTime != 1 { t.Fatalf("join time is not valid %d", s2.members[s1.config.NodeName].statusLTime) } if s2.clock.Time() <= s2.members[s1.config.NodeName].statusLTime { t.Fatalf("join should increment") } oldClock := s2.clock.Time() err = s1.Leave() if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // s1 clock should exceed s2 due to leave if s2.clock.Time() <= oldClock { t.Fatalf("leave should increment (%d / %d)", s2.clock.Time(), oldClock) } }
func TestRPCClientGetCoordinate(t *testing.T) { client, a1, ipc := testRPCClient(t) defer ipc.Shutdown() defer client.Close() defer a1.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() coord, err := client.GetCoordinate(a1.conf.NodeName) if err != nil { t.Fatalf("err: %v", err) } if coord == nil { t.Fatalf("should have gotten a coordinate") } coord, err = client.GetCoordinate("nope") if err != nil { t.Fatalf("err: %v", err) } if coord != nil { t.Fatalf("should have not gotten a coordinate") } }
func TestRPCEndpointJoin(t *testing.T) { a1 := testAgent() a2 := testAgent() defer a1.Shutdown() defer a2.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } if err := a2.Start(); err != nil { t.Fatalf("err: %s", err) } e := &rpcEndpoint{agent: a1} var n int s2Addr := a2.SerfConfig.MemberlistConfig.BindAddr err := e.Join([]string{s2Addr}, &n) if err != nil { t.Fatalf("err: %s", err) } if n != 1 { t.Fatalf("bad n: %d", n) } testutil.Yield() if len(a2.Serf().Members()) != 2 { t.Fatalf("should have 2 members: %#v", a2.Serf().Members()) } }
func TestRPCClientJoin(t *testing.T) { client, a1, ipc := testRPCClient(t) a2 := testAgent(nil) defer ipc.Shutdown() defer client.Close() defer a1.Shutdown() defer a2.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } if err := a2.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() n, err := client.Join([]string{a2.conf.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } if n != 1 { t.Fatalf("n != 1: %d", n) } }
func TestSerf_RemoveFailed_eventsLeave(t *testing.T) { // Create the s1 config with an event channel so we can listen eventCh := make(chan Event, 4) s1Config := testConfig() s1Config.EventCh = eventCh s2Config := testConfig() s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() defer s2.Shutdown() testutil.Yield() _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := s2.Shutdown(); err != nil { t.Fatalf("err: %s", err) } time.Sleep(s2Config.MemberlistConfig.ProbeInterval * 3) if err := s1.RemoveFailedNode(s2Config.NodeName); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Now that s2 has failed and been marked as left, we check the // events to make sure we got a leave event in s1 about the leave. testEvents(t, eventCh, s2Config.NodeName, []EventType{EventMemberJoin, EventMemberFailed, EventMemberLeave}) }
func TestSerf_Join_IgnoreOld(t *testing.T) { // Create the s1 config with an event channel so we can listen eventCh := make(chan Event, 4) s1Config := testConfig() s2Config := testConfig() s2Config.EventCh = eventCh s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() defer s2.Shutdown() testutil.Yield() // Fire a user event if err := s1.UserEvent("event!", []byte("test"), false); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Fire a user event if err := s1.UserEvent("second", []byte("foobar"), false); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // join with ignoreOld set to true! should not get events _, err = s2.Join([]string{s1Config.MemberlistConfig.BindAddr}, true) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // check the events to make sure we got nothing testUserEvents(t, eventCh, []string{}, [][]byte{}) }
func TestRPCClientStream_User(t *testing.T) { client, a1, ipc := testRPCClient(t) defer ipc.Shutdown() defer client.Close() defer a1.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } eventCh := make(chan map[string]interface{}, 64) if handle, err := client.Stream("user", eventCh); err != nil { t.Fatalf("err: %s", err) } else { defer client.Stop(handle) } testutil.Yield() if err := client.UserEvent("deploy", []byte("foo"), false); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() select { case e := <-eventCh: if e["Event"].(string) != "user" { t.Fatalf("bad event: %#v", e) } if e["LTime"].(int64) != 1 { t.Fatalf("bad event: %#v", e) } if e["Name"].(string) != "deploy" { t.Fatalf("bad event: %#v", e) } if bytes.Compare(e["Payload"].([]byte), []byte("foo")) != 0 { t.Fatalf("bad event: %#v", e) } if e["Coalesce"].(bool) != false { t.Fatalf("bad event: %#v", e) } default: t.Fatalf("should have event") } }
func TestRPCClientLeave(t *testing.T) { p1 := testRPCClient(t) defer p1.Close() testutil.Yield() if err := p1.client.Leave(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() select { case <-p1.agent.ShutdownCh(): default: t.Fatalf("agent should be shutdown!") } }
func TestRPCClientMonitor(t *testing.T) { client, a1, ipc := testRPCClient(t) defer ipc.Shutdown() defer client.Close() defer a1.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } eventCh := make(chan string, 64) if handle, err := client.Monitor("debug", eventCh); err != nil { t.Fatalf("err: %s", err) } else { defer client.Stop(handle) } testutil.Yield() select { case e := <-eventCh: if !strings.Contains(e, "Accepted client") { t.Fatalf("bad: %s", e) } default: t.Fatalf("should have backlog") } // Drain the rest of the messages as we know it drainEventCh(eventCh) // Join a bad thing to generate more events a1.Join(nil, false) testutil.Yield() select { case e := <-eventCh: if !strings.Contains(e, "joining") { t.Fatalf("bad: %s", e) } default: t.Fatalf("should have message") } }
func TestSerf_InstallKey(t *testing.T) { s1, err := testKeyringSerf() if err != nil { t.Fatalf("%s", err) } defer s1.Shutdown() s2, err := testKeyringSerf() if err != nil { t.Fatalf("%s", err) } defer s2.Shutdown() primaryKey := s1.config.MemberlistConfig.Keyring.GetPrimaryKey() // Join s1 and s2 _, err = s1.Join([]string{s2.config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Begin tests newKey := "l4ZkaypGLT8AsB0LBldthw==" newKeyBytes, err := base64.StdEncoding.DecodeString(newKey) if err != nil { t.Fatalf("err: %s", err) } manager := s1.KeyManager() // Install a new key onto the existing ring. This is a blocking call, so no // need for a yield. _, err = manager.InstallKey(newKey) if err != nil { t.Fatalf("err: %s", err) } // Key installation did not affect the current primary key if !bytes.Equal(primaryKey, s1.config.MemberlistConfig.Keyring.GetPrimaryKey()) { t.Fatal("Unexpected primary key change on s1") } if !bytes.Equal(primaryKey, s2.config.MemberlistConfig.Keyring.GetPrimaryKey()) { t.Fatal("Unexpected primary key change on s2") } // New key was successfully broadcasted and installed on all members if !keyExistsInRing(s1.config.MemberlistConfig.Keyring, newKeyBytes) { t.Fatal("Newly-installed key not found in keyring on s1") } if !keyExistsInRing(s2.config.MemberlistConfig.Keyring, newKeyBytes) { t.Fatal("Newly-installed key not found in keyring on s2") } }
func TestRPCEndpointUserEvent(t *testing.T) { a1 := testAgent() defer a1.Shutdown() handler := new(MockEventHandler) a1.EventHandler = handler if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() e := &rpcEndpoint{agent: a1} args := RPCUserEventArgs{ Name: "deploy", Payload: []byte("foo"), } if err := e.UserEvent(args, new(interface{})); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() handler.Lock() defer handler.Unlock() if len(handler.Events) == 0 { t.Fatal("no events") } serfEvent, ok := handler.Events[len(handler.Events)-1].(serf.UserEvent) if !ok { t.Fatalf("bad: %#v", serfEvent) } if serfEvent.Name != "deploy" { t.Fatalf("bad: %#v", serfEvent) } if string(serfEvent.Payload) != "foo" { t.Fatalf("bad: %#v", serfEvent) } }
func TestRPCClientForceLeave(t *testing.T) { client, a1 := testRPCClient(t) a2 := testAgent() defer client.Close() defer a1.Shutdown() defer a2.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } if err := a2.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() s2Addr := a2.SerfConfig.MemberlistConfig.BindAddr if _, err := a1.Join([]string{s2Addr}); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := a2.Shutdown(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := client.ForceLeave(a2.SerfConfig.NodeName); err != nil { t.Fatalf("err: %s", err) } m := a1.Serf().Members() if len(m) != 2 { t.Fatalf("should have 2 members: %#v", a1.Serf().Members()) } if m[1].Status != serf.StatusLeft { t.Fatalf("should be left: %#v", m[1]) } }
func TestRPCEndpointForceLeave(t *testing.T) { a1 := testAgent() a2 := testAgent() defer a1.Shutdown() defer a2.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } if err := a2.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() s2Addr := a2.SerfConfig.MemberlistConfig.BindAddr if _, err := a1.Join([]string{s2Addr}); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() if err := a2.Shutdown(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() e := &rpcEndpoint{agent: a1} if err := e.ForceLeave(a2.SerfConfig.NodeName, new(interface{})); err != nil { t.Fatalf("err: %s", err) } m := a1.Serf().Members() if len(m) != 2 { t.Fatalf("should have 2 members: %#v", a1.Serf().Members()) } if m[1].Status != serf.StatusLeft { t.Fatalf("should be left: %#v", m[1]) } }
func TestRPCClientMembers(t *testing.T) { client, a1, ipc := testRPCClient(t) a2 := testAgent(nil) defer ipc.Shutdown() defer client.Close() defer a1.Shutdown() defer a2.Shutdown() if err := a1.Start(); err != nil { t.Fatalf("err: %s", err) } if err := a2.Start(); err != nil { t.Fatalf("err: %s", err) } testutil.Yield() mem, err := client.Members() if err != nil { t.Fatalf("err: %s", err) } if len(mem) != 1 { t.Fatalf("bad: %#v", mem) } _, err = client.Join([]string{a2.conf.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() mem, err = client.Members() if err != nil { t.Fatalf("err: %s", err) } if len(mem) != 2 { t.Fatalf("bad: %#v", mem) } }
func TestCommandRun_advertiseAddr(t *testing.T) { doneCh := make(chan struct{}) shutdownCh := make(chan struct{}) defer func() { close(shutdownCh) <-doneCh }() c := &Command{ ShutdownCh: shutdownCh, Ui: new(cli.MockUi), } rpcAddr := getRPCAddr() args := []string{ "-bind", testutil.GetBindAddr().String(), "-rpc-addr", rpcAddr, "-advertise", "127.0.0.10:12345", } go func() { code := c.Run(args) if code != 0 { log.Printf("bad: %d", code) } close(doneCh) }() testutil.Yield() client, err := rpc.NewClient(rpcAddr) if err != nil { t.Fatalf("err: %s", err) } defer client.Close() members, err := client.Members() if err != nil { t.Fatalf("err: %s", err) } if len(members) != 1 { t.Fatalf("bad: %#v", members) } // Check the addr and port is as advertised! m := members[0] if bytes.Compare(m.Addr, []byte{127, 0, 0, 10}) != 0 { t.Fatalf("bad: %#v", m) } if m.Port != 12345 { t.Fatalf("bad: %#v", m) } }
func TestSerfRemoveFailedNode(t *testing.T) { s1Config := testConfig() s2Config := testConfig() s3Config := testConfig() s1, err := Create(s1Config) if err != nil { t.Fatalf("err: %s", err) } s2, err := Create(s2Config) if err != nil { t.Fatalf("err: %s", err) } s3, err := Create(s3Config) if err != nil { t.Fatalf("err: %s", err) } defer s1.Shutdown() defer s2.Shutdown() defer s3.Shutdown() _, err = s1.Join([]string{s2Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } _, err = s1.Join([]string{s3Config.MemberlistConfig.BindAddr}, false) if err != nil { t.Fatalf("err: %s", err) } testutil.Yield() // Now force the shutdown of s2 so it appears to fail. if err := s2.Shutdown(); err != nil { t.Fatalf("err: %s", err) } time.Sleep(s2Config.MemberlistConfig.ProbeInterval * 5) // Verify that s2 is "failed" testMember(t, s1.Members(), s2Config.NodeName, StatusFailed) // Now remove the failed node if err := s1.RemoveFailedNode(s2Config.NodeName); err != nil { t.Fatalf("err: %s", err) } // Verify that s2 is gone testMember(t, s1.Members(), s2Config.NodeName, StatusLeft) testMember(t, s3.Members(), s2Config.NodeName, StatusLeft) }